diff --git bin/ext/hbaseimport.sh bin/ext/hbaseimport.sh deleted file mode 100644 index 638cdcf..0000000 --- bin/ext/hbaseimport.sh +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -THISSERVICE=hbaseimport -export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " - -hbaseimport () { - CLASS=org.apache.hadoop.hive.metastore.hbase.HBaseImport - HIVE_OPTS='' - execHiveCmd $CLASS "$@" -} - -hbaseimport_help () { - echo "usage ./hive hbaseimport" -} diff --git bin/ext/hbaseschematool.sh bin/ext/hbaseschematool.sh deleted file mode 100644 index 4d4570a..0000000 --- bin/ext/hbaseschematool.sh +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -THISSERVICE=hbaseschematool -export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " - -hbaseschematool () { - CLASS=org.apache.hadoop.hive.metastore.hbase.HBaseSchemaTool - HIVE_OPTS='' - execHiveCmd $CLASS "$@" -} - -hbaseschematool_help () { - echo "usage ./hive hbaseschematool [-d ] " -} diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 14235b5..62a65c9 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -291,21 +291,6 @@ private static URL checkConfigFile(File f) { HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL, HiveConf.ConfVars.METASTORE_FASTPATH, - HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE, - HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE, - HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS, - HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY, - HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE, - HiveConf.ConfVars.METASTORE_HBASE_CACHE_TIME_TO_LIVE, - HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_WRITER_WAIT, - HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_READER_WAIT, - HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_FULL, - HiveConf.ConfVars.METASTORE_HBASE_CACHE_CLEAN_UNTIL, - HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL, HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS }; @@ -622,43 +607,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "undefined and most likely undesired behavior will result"), METASTORE_FS_HANDLER_THREADS_COUNT("hive.metastore.fshandler.threads", 15, "Number of threads to be allocated for metastore handler for fs operations."), - METASTORE_HBASE_CATALOG_CACHE_SIZE("hive.metastore.hbase.catalog.cache.size", 50000, "Maximum number of " + - "objects we will place in the hbase metastore catalog cache. The objects will be divided up by " + - "types that we need to cache."), - METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.hbase.aggregate.stats.cache.size", 10000, - "Maximum number of aggregate stats nodes that we will place in the hbase metastore aggregate stats cache."), - METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.hbase.aggregate.stats.max.partitions", 10000, - "Maximum number of partitions that are aggregated per cache node."), - METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY("hive.metastore.hbase.aggregate.stats.false.positive.probability", - (float) 0.01, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."), - METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.hbase.aggregate.stats.max.variance", (float) 0.1, - "Maximum tolerable variance in number of partitions between a cached node and our request (default 10%)."), - METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS), - "Number of seconds for a cached node to be active in the cache before they become stale."), - METASTORE_HBASE_CACHE_MAX_WRITER_WAIT("hive.metastore.hbase.cache.max.writer.wait", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS), - "Number of milliseconds a writer will wait to acquire the writelock before giving up."), - METASTORE_HBASE_CACHE_MAX_READER_WAIT("hive.metastore.hbase.cache.max.reader.wait", "1000ms", new TimeValidator(TimeUnit.MILLISECONDS), - "Number of milliseconds a reader will wait to acquire the readlock before giving up."), - METASTORE_HBASE_CACHE_MAX_FULL("hive.metastore.hbase.cache.max.full", (float) 0.9, - "Maximum cache full % after which the cache cleaner thread kicks in."), - METASTORE_HBASE_CACHE_CLEAN_UNTIL("hive.metastore.hbase.cache.clean.until", (float) 0.8, - "The cleaner thread cleans until cache reaches this % full size."), - METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class", - "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection", - "Class used to connection to HBase"), - METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES("hive.metastore.hbase.aggr.stats.cache.entries", - 10000, "How many in stats objects to cache in memory"), - METASTORE_HBASE_AGGR_STATS_MEMORY_TTL("hive.metastore.hbase.aggr.stats.memory.ttl", "60s", - new TimeValidator(TimeUnit.SECONDS), - "Number of seconds stats objects live in memory after they are read from HBase."), - METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY( - "hive.metastore.hbase.aggr.stats.invalidator.frequency", "5s", - new TimeValidator(TimeUnit.SECONDS), - "How often the stats cache scans its HBase entries and looks for expired entries"), - METASTORE_HBASE_AGGR_STATS_HBASE_TTL("hive.metastore.hbase.aggr.stats.hbase.ttl", "604800s", - new TimeValidator(TimeUnit.SECONDS), - "Number of seconds stats entries live in HBase cache after they are created. They may be" + - " invalided by updates or partition drops before this. Default is one week."), METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1, "Number of threads to use to read file metadata in background to cache it."), diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java deleted file mode 100644 index d4cd818..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; -import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -import org.apache.hadoop.hive.ql.session.SessionState; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -/** - * Integration tests with HBase Mini-cluster for HBaseStore - */ -public class HBaseIntegrationTests { - - private static final Logger LOG = LoggerFactory.getLogger(HBaseIntegrationTests.class.getName()); - - protected static HBaseTestingUtility utility; - protected static HBaseAdmin admin; - protected static Map emptyParameters = new HashMap<>(); - protected static HiveConf conf; - - protected HBaseStore store; - protected Driver driver; - - protected static void startMiniCluster() throws Exception { - String connectionClassName = - System.getProperty(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS.varname); - boolean testingTephra = - connectionClassName != null && connectionClassName.equals(TephraHBaseConnection.class.getName()); - if (testingTephra) { - LOG.info("Testing with Tephra"); - } - Configuration hbaseConf = HBaseConfiguration.create(); - hbaseConf.setInt("hbase.master.info.port", -1); - utility = new HBaseTestingUtility(hbaseConf); - utility.startMiniCluster(); - conf = new HiveConf(utility.getConfiguration(), HBaseIntegrationTests.class); - admin = utility.getHBaseAdmin(); - HBaseStoreTestUtil.initHBaseMetastore(admin, null); - } - - protected static void shutdownMiniCluster() throws Exception { - utility.shutdownMiniCluster(); - } - - protected void setupConnection() throws IOException { - - } - - protected void setupDriver() { - // This chicanery is necessary to make the driver work. Hive tests need the pfile file - // system, while the hbase one uses something else. So first make sure we've configured our - // hbase connection, then get a new config file and populate it as desired. - HBaseReadWrite.setConf(conf); - conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); - conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - "org.apache.hadoop.hive.metastore.hbase.HBaseStore"); - conf.setBoolVar(HiveConf.ConfVars.METASTORE_FASTPATH, true); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - // Setup so we can test SQL standard auth - conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE, true); - conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, - SQLStdHiveAuthorizerFactoryForTest.class.getName()); - conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, - SessionStateConfigUserAuthenticator.class.getName()); - conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setVar(HiveConf.ConfVars.USERS_IN_ADMIN_ROLE, System.getProperty("user.name")); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE,"nonstrict"); - //HBaseReadWrite.setTestConnection(hconn); - - SessionState.start(new CliSessionState(conf)); - driver = new Driver(conf); - } - - protected void setupHBaseStore() { - // Turn off caching, as we want to test actual interaction with HBase - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - store = new HBaseStore(); - store.setConf(conf); - } - -} - diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java deleted file mode 100644 index 51d96dd..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java +++ /dev/null @@ -1,691 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -/** - * Integration tests with HBase Mini-cluster for HBaseStore - */ -public class TestHBaseAggrStatsCacheIntegration extends HBaseIntegrationTests { - - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStoreIntegration.class.getName()); - - @Rule public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException { - setupConnection(); - setupHBaseStore(); - store.backdoor().getStatsCache().resetCounters(); - } - - private static interface Checker { - void checkStats(AggrStats aggrStats) throws Exception; - } - - @Test - public void hit() throws Exception { - String dbName = "default"; - String tableName = "hit"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - cols.add(new FieldSchema("col2", "varchar", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - for (List partVals : Arrays.asList(partVals1, partVals2)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); - bcsd.setNumFalses(10); - bcsd.setNumTrues(20); - bcsd.setNumNulls(30); - data.setBooleanStats(bcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - obj = new ColumnStatisticsObj(); - obj.setColName("col2"); - obj.setColType("varchar"); - data = new ColumnStatisticsData(); - StringColumnStatsData scsd = new StringColumnStatsData(); - scsd.setAvgColLen(10.3); - scsd.setMaxColLen(2000); - scsd.setNumNulls(3); - scsd.setNumDVs(12342); - data.setStringStats(scsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(2, aggrStats.getPartsFound()); - Assert.assertEquals(2, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("boolean", cso.getColType()); - BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); - Assert.assertEquals(20, bcsd.getNumFalses()); - Assert.assertEquals(40, bcsd.getNumTrues()); - Assert.assertEquals(60, bcsd.getNumNulls()); - - cso = aggrStats.getColStats().get(1); - Assert.assertEquals("col2", cso.getColName()); - Assert.assertEquals("varchar", cso.getColType()); - StringColumnStatsData scsd = cso.getStatsData().getStringStats(); - Assert.assertEquals(10.3, scsd.getAvgColLen(), 0.1); - Assert.assertEquals(2000, scsd.getMaxColLen()); - Assert.assertEquals(6, scsd.getNumNulls()); - Assert.assertEquals(12342, scsd.getNumDVs()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - // Call again, this time it should come from memory. Also, reverse the name order this time - // to assure that we still hit. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1", "col2")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - store.backdoor().getStatsCache().flushMemory(); - // Call again, this time it should come from hbase - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(2, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - } - - @Test - public void someWithStats() throws Exception { - String dbName = "default"; - String tableName = "psws"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - boolean first = true; - for (List partVals : Arrays.asList(partVals1, partVals2)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/psws/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - store.addPartition(part); - - if (first) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData lcsd = new LongColumnStatsData(); - lcsd.setHighValue(192L); - lcsd.setLowValue(-20L); - lcsd.setNumNulls(30); - lcsd.setNumDVs(32); - data.setLongStats(lcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals); - first = false; - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(1, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(192L, lcsd.getHighValue()); - Assert.assertEquals(-20L, lcsd.getLowValue()); - Assert.assertEquals(30, lcsd.getNumNulls()); - Assert.assertEquals(32, lcsd.getNumDVs()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - // Call again, this time it should come from memory. Also, reverse the name order this time - // to assure that we still hit. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - store.backdoor().getStatsCache().flushMemory(); - // Call again, this time it should come from hbase - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(1, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - } - - @Test - public void invalidation() throws Exception { - try { - String dbName = "default"; - String tableName = "invalidation"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - List partVals3 = Arrays.asList("tomorrow"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); - bcsd.setNumFalses(10); - bcsd.setNumTrues(20); - bcsd.setNumNulls(30); - data.setBooleanStats(bcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(2, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("boolean", cso.getColType()); - BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); - Assert.assertEquals(20, bcsd.getNumFalses()); - Assert.assertEquals(40, bcsd.getNumTrues()); - Assert.assertEquals(60, bcsd.getNumNulls()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - // Call again, this time it should come from memory. Also, reverse the name order this time - // to assure that we still hit. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - // Now call a different combination to get it in memory too - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - // wake the invalidator and check again to make sure it isn't too aggressive about - // removing our stuff. - store.backdoor().getStatsCache().wakeInvalidator(); - - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(5, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - // Update statistics for 'tomorrow' - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals3.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); - bcsd.setNumFalses(100); - bcsd.setNumTrues(200); - bcsd.setNumNulls(300); - data.setBooleanStats(bcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - Checker afterUpdate = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(2, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("boolean", cso.getColType()); - BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); - Assert.assertEquals(110, bcsd.getNumFalses()); - Assert.assertEquals(220, bcsd.getNumTrues()); - Assert.assertEquals(330, bcsd.getNumNulls()); - } - }; - - store.updatePartitionColumnStatistics(cs, partVals3); - - store.backdoor().getStatsCache().setRunInvalidatorEvery(100); - store.backdoor().getStatsCache().wakeInvalidator(); - - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - afterUpdate.checkStats(aggrStats); - - // Check that we missed, which means this aggregate was dropped from the cache. - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); - - // Check that our other aggregate is still in the cache. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(7, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); - - // Drop 'yesterday', so our first aggregate should be dumped from memory and hbase - store.dropPartition(dbName, tableName, partVals2); - - store.backdoor().getStatsCache().wakeInvalidator(); - - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(1, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("boolean", cso.getColType()); - BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); - Assert.assertEquals(10, bcsd.getNumFalses()); - Assert.assertEquals(20, bcsd.getNumTrues()); - Assert.assertEquals(30, bcsd.getNumNulls()); - } - }.checkStats(aggrStats); - - // Check that we missed, which means this aggregate was dropped from the cache. - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(8, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); - - // Check that our other aggregate is still in the cache. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - afterUpdate.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(9, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); - } finally { - store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); - store.backdoor().getStatsCache().setMaxTimeInCache(500000); - store.backdoor().getStatsCache().wakeInvalidator(); - } - } - - @Test - public void alterInvalidation() throws Exception { - try { - String dbName = "default"; - String tableName = "ai"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - List partVals3 = Arrays.asList("tomorrow"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - Partition[] partitions = new Partition[3]; - int partnum = 0; - for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - partitions[partnum++] = part; - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); - bcsd.setNumFalses(10); - bcsd.setNumTrues(20); - bcsd.setNumNulls(30); - data.setBooleanStats(bcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals); - } - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=tomorrow"), Arrays.asList("col1")); - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - // wake the invalidator and check again to make sure it isn't too aggressive about - // removing our stuff. - store.backdoor().getStatsCache().wakeInvalidator(); - - Partition newPart = new Partition(partitions[2]); - newPart.setLastAccessTime((int)System.currentTimeMillis()); - store.alterPartition(dbName, tableName, partVals3, newPart); - - store.backdoor().getStatsCache().setRunInvalidatorEvery(100); - store.backdoor().getStatsCache().wakeInvalidator(); - - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - - // Check that we missed, which means this aggregate was dropped from the cache. - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); - - // Check that our other aggregate is still in the cache. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); - } finally { - store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); - store.backdoor().getStatsCache().setMaxTimeInCache(500000); - store.backdoor().getStatsCache().wakeInvalidator(); - } - } - - @Test - public void altersInvalidation() throws Exception { - try { - String dbName = "default"; - String tableName = "asi"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - List partVals3 = Arrays.asList("tomorrow"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - Partition[] partitions = new Partition[3]; - int partnum = 0; - for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - partitions[partnum++] = part; - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); - bcsd.setNumFalses(10); - bcsd.setNumTrues(20); - bcsd.setNumNulls(30); - data.setBooleanStats(bcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals); - } - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=tomorrow"), Arrays.asList("col1")); - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); - - // wake the invalidator and check again to make sure it isn't too aggressive about - // removing our stuff. - store.backdoor().getStatsCache().wakeInvalidator(); - - Partition[] newParts = new Partition[2]; - newParts[0] = new Partition(partitions[0]); - newParts[0].setLastAccessTime((int)System.currentTimeMillis()); - newParts[1] = new Partition(partitions[2]); - newParts[1].setLastAccessTime((int) System.currentTimeMillis()); - store.alterPartitions(dbName, tableName, Arrays.asList(partVals1, partVals3), - Arrays.asList(newParts)); - - store.backdoor().getStatsCache().setRunInvalidatorEvery(100); - store.backdoor().getStatsCache().wakeInvalidator(); - - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); - - // Check that we missed, which means this aggregate was dropped from the cache. - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); - - // Check that our other aggregate got dropped too - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); - } finally { - store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); - store.backdoor().getStatsCache().setMaxTimeInCache(500000); - store.backdoor().getStatsCache().wakeInvalidator(); - } - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java deleted file mode 100644 index b1d3174..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ /dev/null @@ -1,775 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.metastore.ObjectStore; -import org.apache.hadoop.hive.metastore.RawStore; -import org.apache.hadoop.hive.metastore.TestObjectStore; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Test that import from an RDBMS based metastore works - */ -public class TestHBaseImport extends HBaseIntegrationTests { - - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseImport.class.getName()); - - private static final String[] tableNames = new String[] {"allnonparttable", "allparttable"}; - private static final String[] partVals = new String[] {"na", "emea", "latam", "apac"}; - private static final String[] funcNames = new String[] {"allfunc1", "allfunc2"}; - private static final String[] indexNames = new String[] {"allindex1", "allindex2"}; - private static final String[] pkNames = new String[] {"allnonparttable_pk", "allparttable_pk"}; - private static final String[] fkNames = new String[] {"", "allparttable_fk"}; - - private static final List masterKeySeqs = new ArrayList(); - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - TestObjectStore.dropAllStoreObjects(rdbms); - } - - @AfterClass - public static void shutdown() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - TestObjectStore.dropAllStoreObjects(rdbms); - for (int seq : masterKeySeqs) { - rdbms.removeMasterKey(seq); - } - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException { - setupConnection(); - setupHBaseStore(); - } - - @Test - public void importAll() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"alldb1", "alldb2"}; - String[] roles = new String[] {"allrole1", "allrole2"}; - String[] tokenIds = new String[] {"alltokenid1", "alltokenid2"}; - String[] tokens = new String[] {"alltoken1", "alltoken2"}; - String[] masterKeys = new String[] {"allmk1", "allmk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - - HBaseImport importer = new HBaseImport("-a"); - importer.setConnections(rdbms, store); - importer.run(); - - for (int i = 0; i < roles.length; i++) { - Role role = store.getRole(roles[i]); - Assert.assertNotNull(role); - Assert.assertEquals(roles[i], role.getRoleName()); - } - // Make sure there aren't any extra roles - Assert.assertEquals(baseNumRoles + 2, store.listRoleNames().size()); - - for (int i = 0; i < dbNames.length; i++) { - Database db = store.getDatabase(dbNames[i]); - Assert.assertNotNull(db); - // check one random value in the db rather than every value - Assert.assertEquals("file:/tmp", db.getLocationUri()); - - Table table = store.getTable(db.getName(), tableNames[0]); - Assert.assertNotNull(table); - Assert.assertEquals(now, table.getLastAccessTime()); - Assert.assertEquals("input", table.getSd().getInputFormat()); - - table = store.getTable(db.getName(), tableNames[1]); - Assert.assertNotNull(table); - - for (int j = 0; j < partVals.length; j++) { - Partition part = store.getPartition(dbNames[i], tableNames[1], Arrays.asList(partVals[j])); - Assert.assertNotNull(part); - Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation()); - } - - Assert.assertEquals(4, store.getPartitions(dbNames[i], tableNames[1], -1).size()); - // Including two index table - Assert.assertEquals(4, store.getAllTables(dbNames[i]).size()); - - Assert.assertEquals(2, store.getIndexes(dbNames[i], tableNames[0], -1).size()); - Assert.assertEquals(0, store.getIndexes(dbNames[i], tableNames[1], -1).size()); - - Assert.assertEquals(2, store.getFunctions(dbNames[i], "*").size()); - for (int j = 0; j < funcNames.length; j++) { - Assert.assertNotNull(store.getFunction(dbNames[i], funcNames[j])); - } - } - - Assert.assertEquals(baseNumDbs + 2, store.getAllDatabases().size()); - - // I can't test total number of tokens or master keys because the import grabs all and copies - // them, which means it grabs the ones imported by importSecurity test (if it's already run). - // Depending on it already running would make the tests order dependent, which junit doesn't - // guarantee. - for (int i = 0; i < tokenIds.length; i++) { - Assert.assertEquals(tokens[i], store.getToken(tokenIds[i])); - } - String[] hbaseKeys = store.getMasterKeys(); - Set keys = new HashSet<>(Arrays.asList(hbaseKeys)); - for (int i = 0; i < masterKeys.length; i++) { - Assert.assertTrue(keys.contains(masterKeys[i])); - } - } - - @Test - public void importOneDb() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"onedbdb1", "onedbdb2"}; - String[] roles = new String[] {"onedbrole1", "onedbrole2"}; - String[] tokenIds = new String[] {"onedbtokenid1", "onedbtokenid2"}; - String[] tokens = new String[] {"onedbtoken1", "onedbtoken2"}; - String[] masterKeys = new String[] {"onedbmk1", "onedbmk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : - store.getAllTokenIdentifiers().size(); - int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; - - HBaseImport importer = new HBaseImport("-d", dbNames[0]); - importer.setConnections(rdbms, store); - importer.run(); - - // Make sure there aren't any extra roles - Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); - - Database db = store.getDatabase(dbNames[0]); - Assert.assertNotNull(db); - // check one random value in the db rather than every value - Assert.assertEquals("file:/tmp", db.getLocationUri()); - - Table table = store.getTable(db.getName(), tableNames[0]); - Assert.assertNotNull(table); - Assert.assertEquals(now, table.getLastAccessTime()); - Assert.assertEquals("input", table.getSd().getInputFormat()); - - table = store.getTable(db.getName(), tableNames[1]); - Assert.assertNotNull(table); - - for (int j = 0; j < partVals.length; j++) { - Partition part = store.getPartition(dbNames[0], tableNames[1], Arrays.asList(partVals[j])); - Assert.assertNotNull(part); - Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation()); - } - - Assert.assertEquals(4, store.getPartitions(dbNames[0], tableNames[1], -1).size()); - // Including two index table - Assert.assertEquals(4, store.getAllTables(dbNames[0]).size()); - - Assert.assertEquals(2, store.getIndexes(dbNames[0], tableNames[0], -1).size()); - Assert.assertEquals(0, store.getIndexes(dbNames[0], tableNames[1], -1).size()); - - Assert.assertEquals(2, store.getFunctions(dbNames[0], "*").size()); - for (int j = 0; j < funcNames.length; j++) { - Assert.assertNotNull(store.getFunction(dbNames[0], funcNames[j])); - } - - Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); - - Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); - String[] hbaseKeys = store.getMasterKeys(); - Assert.assertEquals(baseNumKeys, hbaseKeys.length); - - // Have to do this last as it will throw an exception - thrown.expect(NoSuchObjectException.class); - store.getDatabase(dbNames[1]); - } - - @Test - public void importOneFunc() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"onefuncdb1", "onefuncdb2"}; - String[] roles = new String[] {"onefuncrole1", "onefuncrole2"}; - String[] tokenIds = new String[] {"onefunctokenid1", "onefunctokenid2"}; - String[] tokens = new String[] {"onefunctoken1", "onefunctoken2"}; - String[] masterKeys = new String[] {"onefuncmk1", "onefuncmk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : - store.getAllTokenIdentifiers().size(); - int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; - - // Create the database so I can put the function in it. - store.createDatabase( - new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); - - HBaseImport importer = new HBaseImport("-f", dbNames[0] + "." + funcNames[0]); - importer.setConnections(rdbms, store); - importer.run(); - - // Make sure there aren't any extra roles - Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); - - Database db = store.getDatabase(dbNames[0]); - Assert.assertNotNull(db); - - Assert.assertEquals(0, store.getAllTables(dbNames[0]).size()); - Assert.assertEquals(1, store.getFunctions(dbNames[0], "*").size()); - Assert.assertNotNull(store.getFunction(dbNames[0], funcNames[0])); - Assert.assertNull(store.getFunction(dbNames[0], funcNames[1])); - - Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); - - Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); - String[] hbaseKeys = store.getMasterKeys(); - Assert.assertEquals(baseNumKeys, hbaseKeys.length); - } - - @Test - public void importOneTableNonPartitioned() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"onetabdb1", "onetabdb2"}; - String[] roles = new String[] {"onetabrole1", "onetabrole2"}; - String[] tokenIds = new String[] {"onetabtokenid1", "onetabtokenid2"}; - String[] tokens = new String[] {"onetabtoken1", "onetabtoken2"}; - String[] masterKeys = new String[] {"onetabmk1", "onetabmk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : - store.getAllTokenIdentifiers().size(); - int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; - - // Create the database so I can put the table in it. - store.createDatabase( - new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); - - HBaseImport importer = new HBaseImport("-t", dbNames[0] + "." + tableNames[0]); - importer.setConnections(rdbms, store); - importer.run(); - - // Make sure there aren't any extra roles - Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); - - Database db = store.getDatabase(dbNames[0]); - Assert.assertNotNull(db); - - Table table = store.getTable(db.getName(), tableNames[0]); - Assert.assertNotNull(table); - Assert.assertEquals(1, store.getAllTables(db.getName()).size()); - Assert.assertNull(store.getTable(db.getName(), tableNames[1])); - - List indexes = store.getIndexes(db.getName(), tableNames[0], -1); - Assert.assertEquals(2, indexes.size()); - - Assert.assertEquals(0, store.getFunctions(dbNames[0], "*").size()); - Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); - - Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); - String[] hbaseKeys = store.getMasterKeys(); - Assert.assertEquals(baseNumKeys, hbaseKeys.length); - } - - @Test - public void importTablesWithConstraints() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"onetabwcdb1", "onetabwcdb2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, dbNames, now, true); - - // Create the database so I can put the table in it. - store.createDatabase( - new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); - - HBaseImport importer = new HBaseImport("-d", dbNames[0]); - importer.setConnections(rdbms, store); - importer.run(); - - Database db = store.getDatabase(dbNames[0]); - Assert.assertNotNull(db); - - Table table = store.getTable(db.getName(), tableNames[1]); - Assert.assertNotNull(table); - - List pk = store.getPrimaryKeys(dbNames[0], tableNames[1]); - Assert.assertNotNull(pk); - Assert.assertEquals(1, pk.size()); - Assert.assertEquals(dbNames[0], pk.get(0).getTable_db()); - Assert.assertEquals(tableNames[1], pk.get(0).getTable_name()); - Assert.assertEquals(0, pk.get(0).getKey_seq()); - Assert.assertEquals("col1", pk.get(0).getColumn_name()); - Assert.assertEquals(dbNames[0] + "_" + pkNames[1], pk.get(0).getPk_name()); - Assert.assertTrue(pk.get(0).isEnable_cstr()); - Assert.assertFalse(pk.get(0).isValidate_cstr()); - Assert.assertTrue(pk.get(0).isRely_cstr()); - - List fk = - store.getForeignKeys(dbNames[0], tableNames[0], dbNames[0], tableNames[1]); - Assert.assertNotNull(fk); - Assert.assertEquals(1, fk.size()); - Assert.assertEquals(dbNames[0], fk.get(0).getPktable_db()); - Assert.assertEquals(tableNames[0], fk.get(0).getPktable_name()); - Assert.assertEquals("col1", fk.get(0).getPkcolumn_name()); - Assert.assertEquals(dbNames[0], fk.get(0).getFktable_db()); - Assert.assertEquals(tableNames[1], fk.get(0).getFktable_name()); - Assert.assertEquals("col1", fk.get(0).getFkcolumn_name()); - Assert.assertEquals(0, fk.get(0).getKey_seq()); - Assert.assertEquals(1, fk.get(0).getUpdate_rule()); - Assert.assertEquals(2, fk.get(0).getDelete_rule()); - Assert.assertEquals(dbNames[0] + "_" + fkNames[1], fk.get(0).getFk_name()); - Assert.assertTrue(pk.get(0).isEnable_cstr()); - Assert.assertFalse(pk.get(0).isValidate_cstr()); - Assert.assertTrue(pk.get(0).isRely_cstr()); - - - } - - @Test - public void importOneTablePartitioned() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"onetabpartdb1", "onetabpartodb2"}; - String[] roles = new String[] {"onetabpartorole1", "onetabpartorole2"}; - String[] tokenIds = new String[] {"onetabpartotokenid1", "onetabpartotokenid2"}; - String[] tokens = new String[] {"onetabpartotoken1", "onetabpartotoken2"}; - String[] masterKeys = new String[] {"onetabpartomk1", "onetabpartomk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : - store.getAllTokenIdentifiers().size(); - int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; - - // Create the database so I can put the table in it. - store.createDatabase( - new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); - - HBaseImport importer = new HBaseImport("-t", dbNames[0] + "." + tableNames[1]); - importer.setConnections(rdbms, store); - importer.run(); - - // Make sure there aren't any extra roles - Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); - - Database db = store.getDatabase(dbNames[0]); - Assert.assertNotNull(db); - - Table table = store.getTable(db.getName(), tableNames[1]); - Assert.assertNotNull(table); - Assert.assertEquals(1, store.getAllTables(db.getName()).size()); - - for (int j = 0; j < partVals.length; j++) { - Partition part = store.getPartition(dbNames[0], tableNames[1], Arrays.asList(partVals[j])); - Assert.assertNotNull(part); - Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation()); - } - Assert.assertEquals(4, store.getPartitions(dbNames[0], tableNames[1], -1).size()); - - Assert.assertNull(store.getTable(db.getName(), tableNames[0])); - - List indexes = store.getIndexes(db.getName(), tableNames[1], -1); - Assert.assertEquals(0, indexes.size()); - - Assert.assertEquals(0, store.getFunctions(dbNames[0], "*").size()); - Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); - - Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); - String[] hbaseKeys = store.getMasterKeys(); - Assert.assertEquals(baseNumKeys, hbaseKeys.length); - } - - @Test - public void importSecurity() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"securitydb1", "securitydb2"}; - String[] roles = new String[] {"securityrole1", "securityrole2"}; - String[] tokenIds = new String[] {"securitytokenid1", "securitytokenid2"}; - String[] tokens = new String[] {"securitytoken1", "securitytoken2"}; - String[] masterKeys = new String[] {"securitymk1", "securitymk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - - HBaseImport importer = new HBaseImport("-k"); - importer.setConnections(rdbms, store); - importer.run(); - - Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); - - Assert.assertEquals(baseNumDbs, store.getAllDatabases().size()); - - // I can't test total number of tokens or master keys because the import grabs all and copies - // them, which means it grabs the ones imported by importAll test (if it's already run). - // Depending on it already running would make the tests order dependent, which junit doesn't - // guarantee. - for (int i = 0; i < tokenIds.length; i++) { - Assert.assertEquals(tokens[i], store.getToken(tokenIds[i])); - } - String[] hbaseKeys = store.getMasterKeys(); - Set keys = new HashSet<>(Arrays.asList(hbaseKeys)); - for (int i = 0; i < masterKeys.length; i++) { - Assert.assertTrue(keys.contains(masterKeys[i])); - } - } - - // TODO test for bogus function name - // TODO test for bogus table name - // TODO test for non-existent items - - @Test - public void importOneRole() throws Exception { - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"oneroledb1", "oneroledb2"}; - String[] roles = new String[] {"onerolerole1", "onerolerole2"}; - String[] tokenIds = new String[] {"oneroletokenid1", "oneroletokenid2"}; - String[] tokens = new String[] {"oneroletoken1", "oneroletoken2"}; - String[] masterKeys = new String[] {"onerolemk1", "onerolemk2"}; - int now = (int)System.currentTimeMillis() / 1000; - - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); - - int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); - int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); - int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : - store.getAllTokenIdentifiers().size(); - int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; - - HBaseImport importer = new HBaseImport("-r", roles[0]); - importer.setConnections(rdbms, store); - importer.run(); - - Role role = store.getRole(roles[0]); - Assert.assertNotNull(role); - Assert.assertEquals(roles[0], role.getRoleName()); - - // Make sure there aren't any extra roles - Assert.assertEquals(baseNumRoles + 1, store.listRoleNames().size()); - Assert.assertEquals(baseNumDbs, store.getAllDatabases().size()); - - Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); - String[] hbaseKeys = store.getMasterKeys(); - Assert.assertEquals(baseNumKeys, hbaseKeys.length); - - // Have to do this last as it will throw an exception - thrown.expect(NoSuchObjectException.class); - store.getRole(roles[1]); - } - - private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, - String[] tokenIds, String[] tokens, String[] masterKeys, int now) - throws MetaException, InvalidObjectException, NoSuchObjectException { - setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now, false); - } - - private void setupObjectStore(RawStore rdbms, String[] dbNames, int now, - boolean putConstraintsOnTables) - throws MetaException, InvalidObjectException, NoSuchObjectException { - setupObjectStore(rdbms, null, dbNames, null, null, null, now, putConstraintsOnTables); - } - - private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, - String[] tokenIds, String[] tokens, String[] masterKeys, int now, - boolean putConstraintsOnTables) - throws MetaException, InvalidObjectException, NoSuchObjectException { - if (roles != null) { - for (int i = 0; i < roles.length; i++) { - rdbms.addRole(roles[i], "me"); - } - } - - for (int i = 0; i < dbNames.length; i++) { - rdbms.createDatabase( - new Database(dbNames[i], "no description", "file:/tmp", emptyParameters)); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - rdbms.createTable(new Table(tableNames[0], dbNames[i], "me", now, now, 0, sd, null, - emptyParameters, null, null, null)); - if (putConstraintsOnTables) { - rdbms.addPrimaryKeys(Collections.singletonList( - new SQLPrimaryKey(dbNames[i], tableNames[0], "col1", 0, dbNames[i] + "_" + pkNames[0], - true, false, true) - )); - } - - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("region", "string", "")); - rdbms.createTable(new Table(tableNames[1], dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); - if (putConstraintsOnTables) { - rdbms.addPrimaryKeys(Arrays.asList( - new SQLPrimaryKey(dbNames[i], tableNames[1], "col1", 0, dbNames[i] + "_" + pkNames[1], - true, false, true) - )); - rdbms.addForeignKeys(Collections.singletonList( - new SQLForeignKey(dbNames[i], tableNames[0], "col1", dbNames[i], tableNames[1], - "col1", 0, 1, 2, dbNames[i] + "_" + fkNames[1], dbNames[i] + "_" + pkNames[0], - true, false, true) - )); - - } - - for (int j = 0; j < partVals.length; j++) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/region=" + partVals[j]); - Partition part = new Partition(Arrays.asList(partVals[j]), dbNames[i], tableNames[1], - now, now, psd, emptyParameters); - rdbms.addPartition(part); - } - - for (String funcName : funcNames) { - LOG.debug("Creating new function " + dbNames[i] + "." + funcName); - rdbms.createFunction(new Function(funcName, dbNames[i], "classname", "ownername", - PrincipalType.USER, (int) System.currentTimeMillis() / 1000, FunctionType.JAVA, - Arrays.asList(new ResourceUri(ResourceType.JAR, "uri")))); - } - - for (String indexName : indexNames) { - LOG.debug("Creating new index " + dbNames[i] + "." + tableNames[0] + "." + indexName); - String indexTableName = tableNames[0] + "__" + indexName + "__"; - rdbms.createTable(new Table(indexTableName, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); - rdbms.addIndex(new Index(indexName, null, dbNames[i], tableNames[0], - now, now, indexTableName, sd, emptyParameters, false)); - } - } - if (tokenIds != null) { - for (int i = 0; i < tokenIds.length; i++) rdbms.addToken(tokenIds[i], tokens[i]); - } - if (masterKeys != null) { - for (int i = 0; i < masterKeys.length; i++) { - masterKeySeqs.add(rdbms.addMasterKey(masterKeys[i])); - } - } - } - - @Test - public void parallel() throws Exception { - int parallelFactor = 10; - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"paralleldb1"}; - int now = (int)System.currentTimeMillis() / 1000; - - for (int i = 0; i < dbNames.length; i++) { - rdbms.createDatabase( - new Database(dbNames[i], "no description", "file:/tmp", emptyParameters)); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("region", "string", "")); - for (int j = 0; j < parallelFactor; j++) { - rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); - for (int k = 0; k < parallelFactor; k++) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/region=" + k); - Partition part = new Partition(Arrays.asList("p" + k), dbNames[i], "t" + j, - now, now, psd, emptyParameters); - rdbms.addPartition(part); - } - } - } - - HBaseImport importer = new HBaseImport("-p", "2", "-b", "2", "-d", dbNames[0]); - importer.setConnections(rdbms, store); - importer.run(); - - for (int i = 0; i < dbNames.length; i++) { - Database db = store.getDatabase(dbNames[i]); - Assert.assertNotNull(db); - - for (int j = 0; j < parallelFactor; j++) { - Table table = store.getTable(db.getName(), "t" + j); - Assert.assertNotNull(table); - Assert.assertEquals(now, table.getLastAccessTime()); - Assert.assertEquals("input", table.getSd().getInputFormat()); - - for (int k = 0; k < parallelFactor; k++) { - Partition part = - store.getPartition(dbNames[i], "t" + j, Arrays.asList("p" + k)); - Assert.assertNotNull(part); - Assert.assertEquals("file:/tmp/region=" + k, part.getSd().getLocation()); - } - - Assert.assertEquals(parallelFactor, store.getPartitions(dbNames[i], "t" + j, -1).size()); - } - Assert.assertEquals(parallelFactor, store.getAllTables(dbNames[i]).size()); - - } - } - - // Same as the test above except we create 9 of everything instead of 10. This is important - // because in using a batch size of 2 the previous test guarantees 10 /2 =5 , meaning we'll - // have 5 writes on the partition queue with exactly 2 entries. In this test we'll handle the - // case where the last entry in the queue has fewer partitions. - @Test - public void parallelOdd() throws Exception { - int parallelFactor = 9; - RawStore rdbms; - rdbms = new ObjectStore(); - rdbms.setConf(conf); - - String[] dbNames = new String[] {"oddparalleldb1"}; - int now = (int)System.currentTimeMillis() / 1000; - - for (int i = 0; i < dbNames.length; i++) { - rdbms.createDatabase( - new Database(dbNames[i], "no description", "file:/tmp", emptyParameters)); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("region", "string", "")); - for (int j = 0; j < parallelFactor; j++) { - rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); - for (int k = 0; k < parallelFactor; k++) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/region=" + k); - Partition part = new Partition(Arrays.asList("p" + k), dbNames[i], "t" + j, - now, now, psd, emptyParameters); - rdbms.addPartition(part); - } - } - } - - HBaseImport importer = new HBaseImport("-p", "2", "-b", "2", "-d", dbNames[0]); - importer.setConnections(rdbms, store); - importer.run(); - - for (int i = 0; i < dbNames.length; i++) { - Database db = store.getDatabase(dbNames[i]); - Assert.assertNotNull(db); - - for (int j = 0; j < parallelFactor; j++) { - Table table = store.getTable(db.getName(), "t" + j); - Assert.assertNotNull(table); - Assert.assertEquals(now, table.getLastAccessTime()); - Assert.assertEquals("input", table.getSd().getInputFormat()); - - for (int k = 0; k < parallelFactor; k++) { - Partition part = - store.getPartition(dbNames[i], "t" + j, Arrays.asList("p" + k)); - Assert.assertNotNull(part); - Assert.assertEquals("file:/tmp/region=" + k, part.getSd().getLocation()); - } - - Assert.assertEquals(parallelFactor, store.getPartitions(dbNames[i], "t" + j, -1).size()); - } - Assert.assertEquals(parallelFactor, store.getAllTables(dbNames[i]).size()); - - } - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java deleted file mode 100644 index aefafe0..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; -import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; -import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; -import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics; -import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.ObjectStore; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; - -/** - * Test HMS Metrics on HBase Metastore - */ -public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests { - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void before() throws IOException { - HBaseReadWrite.setConf(conf); - conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - "org.apache.hadoop.hive.metastore.hbase.HBaseStore"); - conf.setBoolVar(HiveConf.ConfVars.METASTORE_FASTPATH, true); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - - conf.setBoolVar(HiveConf.ConfVars.METASTORE_METRICS, true); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name()); - SessionState.start(new CliSessionState(conf)); - driver = new Driver(conf); - } - - @Test - public void testMetaDataCounts() throws Exception { - //1 databases created - driver.run("create database testdb1"); - - //4 tables - driver.run("create table testtbl1 (key string)"); - driver.run("create table testtblpart (key string) partitioned by (partkey string)"); - driver.run("use testdb1"); - driver.run("create table testtbl2 (key string)"); - driver.run("create table testtblpart2 (key string) partitioned by (partkey string)"); - - //6 partitions - driver.run("alter table default.testtblpart add partition (partkey='a')"); - driver.run("alter table default.testtblpart add partition (partkey='b')"); - driver.run("alter table default.testtblpart add partition (partkey='c')"); - driver.run("alter table testdb1.testtblpart2 add partition (partkey='a')"); - driver.run("alter table testdb1.testtblpart2 add partition (partkey='b')"); - driver.run("alter table testdb1.testtblpart2 add partition (partkey='c')"); - - - //create and drop some additional metadata, to test drop counts. - driver.run("create database tempdb"); - driver.run("use tempdb"); - - driver.run("create table delete_by_table (key string) partitioned by (partkey string)"); - driver.run("alter table delete_by_table add partition (partkey='temp')"); - driver.run("drop table delete_by_table"); - - driver.run("create table delete_by_part (key string) partitioned by (partkey string)"); - driver.run("alter table delete_by_part add partition (partkey='temp')"); - driver.run("alter table delete_by_part drop partition (partkey='temp')"); - - driver.run("create table delete_by_db (key string) partitioned by (partkey string)"); - driver.run("alter table delete_by_db add partition (partkey='temp')"); - driver.run("use default"); - driver.run("drop database tempdb cascade"); - - CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance(); - String json = metrics.dumpJson(); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_DATABASES, 2); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_TABLES, 7); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_PARTITIONS, 9); - - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DELETE_TOTAL_DATABASES, 1); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DELETE_TOTAL_TABLES, 3); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DELETE_TOTAL_PARTITIONS, 3); - - - //to test initial metadata count metrics. - conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, ObjectStore.class.getName()); - HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("test", conf, false); - baseHandler.init(); - baseHandler.updateMetrics(); - - //1 new db + default - json = metrics.dumpJson(); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, MetricsConstant.INIT_TOTAL_DATABASES, 2); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, MetricsConstant.INIT_TOTAL_TABLES, 4); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.GAUGE, MetricsConstant.INIT_TOTAL_PARTITIONS, 6); - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java deleted file mode 100644 index d4966b9..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java +++ /dev/null @@ -1,223 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; - -/** - * Integration tests with HBase Mini-cluster using actual SQL - */ -public class TestHBaseMetastoreSql extends HBaseIntegrationTests { - - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStoreIntegration.class.getName()); - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void before() throws IOException { - setupConnection(); - setupDriver(); - } - - @Test - public void insertIntoTable() throws Exception { - driver.run("create table iit (c int)"); - CommandProcessorResponse rsp = driver.run("insert into table iit values (3)"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void insertIntoPartitionTable() throws Exception { - driver.run("create table iipt (c int) partitioned by (ds string)"); - CommandProcessorResponse rsp = - driver.run("insert into table iipt partition(ds) values (1, 'today'), (2, 'yesterday')," + - "(3, 'tomorrow')"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void database() throws Exception { - CommandProcessorResponse rsp = driver.run("create database db"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("set role admin"); - Assert.assertEquals(0, rsp.getResponseCode()); - // security doesn't let me change the properties - rsp = driver.run("alter database db set dbproperties ('key' = 'value')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("drop database db"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void table() throws Exception { - driver.run("create table tbl (c int)"); - CommandProcessorResponse rsp = driver.run("insert into table tbl values (3)"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("select * from tbl"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("alter table tbl set tblproperties ('example', 'true')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("drop table tbl"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void partitionedTable() throws Exception { - driver.run("create table parttbl (c int) partitioned by (ds string)"); - CommandProcessorResponse rsp = - driver.run("insert into table parttbl partition(ds) values (1, 'today'), (2, 'yesterday')" + - ", (3, 'tomorrow')"); - Assert.assertEquals(0, rsp.getResponseCode()); - // Do it again, to check insert into existing partitions - rsp = driver.run("insert into table parttbl partition(ds) values (4, 'today'), (5, 'yesterday')" - + ", (6, 'tomorrow')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("insert into table parttbl partition(ds = 'someday') values (1)"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("insert into table parttbl partition(ds = 'someday') values (2)"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("alter table parttbl add partition (ds = 'whenever')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("insert into table parttbl partition(ds = 'whenever') values (2)"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("alter table parttbl touch partition (ds = 'whenever')"); - Assert.assertEquals(0, rsp.getResponseCode()); - // TODO - Can't do this until getPartitionsByExpr implemented - /* - rsp = driver.run("alter table parttbl drop partition (ds = 'whenever')"); - Assert.assertEquals(0, rsp.getResponseCode()); - */ - rsp = driver.run("select * from parttbl"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("select * from parttbl where ds = 'today'"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void role() throws Exception { - CommandProcessorResponse rsp = driver.run("set role admin"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("create role role1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("grant role1 to user fred with admin option"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("create role role2"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("grant role1 to role role2"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("show principals role1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("show role grant role role1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("show role grant user " + System.getProperty("user.name")); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("show roles"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("revoke admin option for role1 from user fred"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("revoke role1 from user fred"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("revoke role1 from role role2"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("show current roles"); - Assert.assertEquals(0, rsp.getResponseCode()); - - rsp = driver.run("drop role role2"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("drop role role1"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void grant() throws Exception { - CommandProcessorResponse rsp = driver.run("set role admin"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("create role role3"); - Assert.assertEquals(0, rsp.getResponseCode()); - driver.run("create table granttbl (c int)"); - Assert.assertEquals(0, rsp.getResponseCode()); - driver.run("grant select on granttbl to " + System.getProperty("user.name")); - Assert.assertEquals(0, rsp.getResponseCode()); - driver.run("grant select on granttbl to role3 with grant option"); - Assert.assertEquals(0, rsp.getResponseCode()); - driver.run("revoke select on granttbl from " + System.getProperty("user.name")); - Assert.assertEquals(0, rsp.getResponseCode()); - driver.run("revoke grant option for select on granttbl from role3"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void describeNonpartitionedTable() throws Exception { - CommandProcessorResponse rsp = driver.run("create table alter1(a int, b int)"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("describe extended alter1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("alter table alter1 set serdeproperties('s1'='9')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("describe extended alter1"); - Assert.assertEquals(0, rsp.getResponseCode()); - } - - @Test - public void alterRenamePartitioned() throws Exception { - driver.run("create table alterrename (c int) partitioned by (ds string)"); - driver.run("alter table alterrename add partition (ds = 'a')"); - CommandProcessorResponse rsp = driver.run("describe extended alterrename partition (ds='a')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("alter table alterrename rename to alter_renamed"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("describe extended alter_renamed partition (ds='a')"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("describe extended alterrename partition (ds='a')"); - Assert.assertEquals(10001, rsp.getResponseCode()); - } - - @Test - public void alterRename() throws Exception { - driver.run("create table alterrename1 (c int)"); - CommandProcessorResponse rsp = driver.run("describe alterrename1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("alter table alterrename1 rename to alter_renamed1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("describe alter_renamed1"); - Assert.assertEquals(0, rsp.getResponseCode()); - rsp = driver.run("describe alterrename1"); - Assert.assertEquals(10001, rsp.getResponseCode()); - } - - -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java deleted file mode 100644 index c98911a..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java +++ /dev/null @@ -1,582 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -public class TestHBaseSchemaTool extends HBaseIntegrationTests { - - private final String lsep = System.getProperty("line.separator"); - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException { - setupHBaseStore(); - } - - @Test - public void listTables() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(true, null, null, null, conf, out, err); - Assert.assertEquals(StringUtils.join(HBaseReadWrite.tableNames, lsep) + lsep, - outStr.toString()); - } - - @Test - public void bogusTable() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, "nosuch", null, null, conf, out, err); - Assert.assertEquals("Unknown table: nosuch" + lsep, errStr.toString()); - } - - @Test - public void noSuchDb() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.DB_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such database: nosuch" + lsep, outStr.toString()); - } - - @Test - public void noMatchingDb() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.DB_TABLE, null, "nomatch", conf, out, err); - Assert.assertEquals("No matching database: nomatch" + lsep, outStr.toString()); - } - - @Test - public void noSuchRole() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.ROLE_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such role: nosuch" + lsep, outStr.toString()); - } - - @Test - public void noMatchingRole() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.ROLE_TABLE, null, "nomatch", conf, out, err); - Assert.assertEquals("No matching role: nomatch" + lsep, outStr.toString()); - } - - @Test - public void noSuchUser() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.USER_TO_ROLE_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such user: nosuch" + lsep, outStr.toString()); - } - - @Test - public void noMatchingUser() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.USER_TO_ROLE_TABLE, null, "nomatch", conf, out, err); - Assert.assertEquals("No matching user: nomatch" + lsep, outStr.toString()); - } - - @Test - public void noSuchFunction() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.FUNC_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such function: nosuch" + lsep, outStr.toString()); - } - - @Test - public void noMatchingFunction() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.FUNC_TABLE, null, "nomatch", conf, out, - err); - Assert.assertEquals("No matching function: nomatch" + lsep, outStr.toString()); - } - - @Test - public void noSuchTable() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.TABLE_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such table: nosuch" + lsep, outStr.toString()); - } - - @Test - public void noMatchingTable() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.TABLE_TABLE, null, "nomatch", conf, out, err); - Assert.assertEquals("No matching table: nomatch" + lsep, outStr.toString()); - } - - @Test - public void noSuchPart() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.PART_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such partition: nosuch" + lsep, outStr.toString()); - } - - @Test - public void noSuchPartValidFormat() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - // Test with something that looks like a valid entry - new HBaseSchemaTool().go(false, HBaseReadWrite.PART_TABLE, "default.nosuch.nosuch", null, conf, - out, err); - Assert.assertEquals("No such partition: default.nosuch.nosuch" + lsep, outStr.toString()); - } - - - @Test - public void noMatchingPart() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.PART_TABLE, null, "nomatch", conf, out, err); - Assert.assertEquals("No matching partition: nomatch" + lsep, outStr.toString()); - } - - @Test - public void noMatchingPartValidFormat() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - new HBaseSchemaTool().go(false, HBaseReadWrite.PART_TABLE, null, "nomatch.a.b", conf, out, err); - Assert.assertEquals("No matching partition: nomatch.a.b" + lsep, outStr.toString()); - } - - @Test - public void noSuchStorageDescriptor() throws Exception { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - // Strangly enough things don't come back quite the same when going through the Base64 - // encode/decode. - new HBaseSchemaTool().go(false, HBaseReadWrite.SD_TABLE, "nosuch", null, conf, out, err); - Assert.assertEquals("No such storage descriptor: nosucg" + lsep, outStr.toString()); - } - - @Test - public void oneMondoTest() throws Exception { - // This is a pain to do in one big test, but we have to control the order so that we have tests - // without dbs, etc. - HBaseSchemaTool tool = new HBaseSchemaTool(); - - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - // This needs to be up front before we create any tables or partitions - tool.go(false, HBaseReadWrite.SD_TABLE, null, "whatever", conf, out, err); - Assert.assertEquals("No storage descriptors" + lsep, outStr.toString()); - - // This one needs to be up front too - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.SEQUENCES_TABLE, null, "whatever", conf, out, err); - Assert.assertEquals("No sequences" + lsep, outStr.toString()); - - // Create some databases - String[] dbNames = new String[3]; - for (int i = 0; i < dbNames.length; i++) { - dbNames[i] = "db" + i; - Database db = new Database(dbNames[i], "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - } - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.DB_TABLE, "db0", null, conf, out, err); - Assert.assertEquals("{\"name\":\"db0\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.DB_TABLE, null, ".*", conf, out, err); - Assert.assertEquals("{\"name\":\"db0\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep + - "{\"name\":\"db1\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep + - "{\"name\":\"db2\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep, - outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.DB_TABLE, null, "db[12]", conf, out, err); - Assert.assertEquals("{\"name\":\"db1\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep + - "{\"name\":\"db2\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep, - outStr.toString()); - - String[] roleNames = new String[2]; - for (int i = 0; i < roleNames.length; i++) { - roleNames[i] = "role" + i; - store.addRole(roleNames[i], "me"); - } - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.ROLE_TABLE, null, "role.", conf, out, err); - Assert.assertEquals("{\"roleName\":\"role0\",\"createTime\":now,\"ownerName\":\"me\"}" + - lsep + "{\"roleName\":\"role1\",\"createTime\":now,\"ownerName\":\"me\"}" + lsep, - outStr.toString().replaceAll("createTime\":[0-9]+", "createTime\":now")); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.ROLE_TABLE, "role1", null, conf, out, err); - Assert.assertEquals("{\"roleName\":\"role1\",\"createTime\":now,\"ownerName\":\"me\"}" + lsep, - outStr.toString().replaceAll("createTime\":[0-9]+", "createTime\":now")); - - Role role1 = store.getRole("role1"); - store.grantRole(role1, "fred", PrincipalType.USER, "me", PrincipalType.USER, false); - store.grantRole(role1, "joanne", PrincipalType.USER, "me", PrincipalType.USER, false); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.USER_TO_ROLE_TABLE, null, ".*", conf, out, err); - Assert.assertEquals("fred: role1" + lsep + "joanne: role1" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.USER_TO_ROLE_TABLE, "joanne", null, conf, out, err); - Assert.assertEquals("role1" + lsep, outStr.toString()); - - String[] funcNames = new String[3]; - for (int i = 0; i < funcNames.length; i++) { - funcNames[i] = "func" + i; - Function function = new Function(funcNames[i], "db1", "Function", "me", PrincipalType.USER, 0, - FunctionType.JAVA, null); - store.createFunction(function); - } - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.FUNC_TABLE, "db1.func0", null, conf, out, err); - Assert.assertEquals("{\"functionName\":\"func0\",\"dbName\":\"db1\"," + - "\"className\":\"Function\",\"ownerName\":\"me\",\"ownerType\":1,\"createTime\":0," + - "\"functionType\":1}" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.FUNC_TABLE, null, ".*", conf, out, err); - Assert.assertEquals("{\"functionName\":\"func0\",\"dbName\":\"db1\"," + - "\"className\":\"Function\",\"ownerName\":\"me\",\"ownerType\":1,\"createTime\":0," + - "\"functionType\":1}" + lsep + - "{\"functionName\":\"func1\",\"dbName\":\"db1\"," + - "\"className\":\"Function\",\"ownerName\":\"me\",\"ownerType\":1,\"createTime\":0," + - "\"functionType\":1}" + lsep + - "{\"functionName\":\"func2\",\"dbName\":\"db1\"," + - "\"className\":\"Function\",\"ownerName\":\"me\",\"ownerType\":1,\"createTime\":0," + - "\"functionType\":1}" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.FUNC_TABLE, null, "db1.func[12]", conf, out, err); - Assert.assertEquals("{\"functionName\":\"func1\",\"dbName\":\"db1\"," + - "\"className\":\"Function\",\"ownerName\":\"me\",\"ownerType\":1,\"createTime\":0," + - "\"functionType\":1}" + lsep + - "{\"functionName\":\"func2\",\"dbName\":\"db1\"," + - "\"className\":\"Function\",\"ownerName\":\"me\",\"ownerType\":1,\"createTime\":0," + - "\"functionType\":1}" + lsep, outStr.toString()); - - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.GLOBAL_PRIVS_TABLE, null, null, conf, out, err); - Assert.assertEquals("No global privileges" + lsep, outStr.toString()); - - List privileges = new ArrayList<>(); - HiveObjectRef hiveObjRef = new HiveObjectRef(HiveObjectType.GLOBAL, "db0", "tab0", null, - null); - PrivilegeGrantInfo grantInfo = - new PrivilegeGrantInfo("read", 0, "me", PrincipalType.USER, false); - HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, "user", PrincipalType.USER, - grantInfo); - privileges.add(hop); - - grantInfo = new PrivilegeGrantInfo("create", 0, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, "user", PrincipalType.USER, grantInfo); - privileges.add(hop); - - PrivilegeBag pBag = new PrivilegeBag(privileges); - store.grantPrivileges(pBag); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.GLOBAL_PRIVS_TABLE, null, null, conf, out, err); - Assert.assertEquals( - "{\"userPrivileges\":{\"user\":[{\"privilege\":\"read\",\"createTime\":0," + - "\"grantor\":\"me\",\"grantorType\":1,\"grantOption\":0},{\"privilege\":\"create\"," + - "\"createTime\":0,\"grantor\":\"me\",\"grantorType\":1,\"grantOption\":1}]}}" + lsep, - outStr.toString()); - - - String[] tableNames = new String[3]; - for (int i = 0; i < tableNames.length; i++) { - tableNames[i] = "tab" + i; - StorageDescriptor sd = new StorageDescriptor(Arrays.asList(new FieldSchema("col1", "int", - ""), new FieldSchema("col2", "varchar(32)", "")), - "/tmp", null, null, false, 0, null, null, null, Collections.emptyMap()); - Table tab = new Table(tableNames[i], dbNames[0], "me", 0, 0, 0, sd, - Arrays.asList(new FieldSchema("pcol1", "string", ""), - new FieldSchema("pcol2", "string", "")), - Collections.emptyMap(), null, null, null); - store.createTable(tab); - } - - ColumnStatisticsDesc tableStatsDesc = new ColumnStatisticsDesc(false, "db0", "tab0"); - ColumnStatisticsData tcsd = new ColumnStatisticsData(); - LongColumnStatsData tlcsd = new LongColumnStatsData(1, 2); - tlcsd.setLowValue(-95); - tlcsd.setHighValue(95); - tcsd.setLongStats(tlcsd); - ColumnStatisticsData tcsd2 = new ColumnStatisticsData(); - tcsd2.setStringStats(new StringColumnStatsData(97, 18.78, 29, 397)); - List tcsos = Arrays.asList( - new ColumnStatisticsObj("col1", "int", tcsd), - new ColumnStatisticsObj("col2", "varchar(32)", tcsd2)); - ColumnStatistics tStatObj = new ColumnStatistics(tableStatsDesc, tcsos); - store.updateTableColumnStatistics(tStatObj); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.TABLE_TABLE, "db0.tab1", null, conf, out, err); - Assert.assertEquals("{\"tableName\":\"tab1\",\"dbName\":\"db0\",\"owner\":\"me\"," + - "\"createTime\":0,\"lastAccessTime\":0,\"retention\":0," + - "\"partitionKeys\":[{\"name\":\"pcol1\",\"type\":\"string\",\"comment\":\"\"}," + - "{\"name\":\"pcol2\",\"type\":\"string\",\"comment\":\"\"}],\"parameters\":{}," + - "\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep, - outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.TABLE_TABLE, null, "db0.*", conf, out, err); - Assert.assertEquals("{\"tableName\":\"tab0\",\"dbName\":\"db0\",\"owner\":\"me\"," + - "\"createTime\":0,\"lastAccessTime\":0,\"retention\":0," + - "\"partitionKeys\":[{\"name\":\"pcol1\",\"type\":\"string\",\"comment\":\"\"}," + - "{\"name\":\"pcol2\",\"type\":\"string\",\"comment\":\"\"}],\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}," + - "\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " + - "col1: {\"colName\":\"col1\",\"colType\":\"int\"," + - "\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," + - "\"numDVs\":2,\"bitVectors\":\"\"}}} column col2: {\"colName\":\"col2\",\"colType\":\"varchar(32)\"," + - "\"statsData\":{\"stringStats\":{\"maxColLen\":97,\"avgColLen\":18.78," + - "\"numNulls\":29,\"numDVs\":397,\"bitVectors\":\"\"}}}" + lsep + - "{\"tableName\":\"tab1\",\"dbName\":\"db0\",\"owner\":\"me\",\"createTime\":0," + - "\"lastAccessTime\":0,\"retention\":0,\"partitionKeys\":[{\"name\":\"pcol1\"," + - "\"type\":\"string\",\"comment\":\"\"},{\"name\":\"pcol2\",\"type\":\"string\"," + - "\"comment\":\"\"}],\"parameters\":{},\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: " + - "qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep + - "{\"tableName\":\"tab2\",\"dbName\":\"db0\",\"owner\":\"me\",\"createTime\":0," + - "\"lastAccessTime\":0,\"retention\":0,\"partitionKeys\":[{\"name\":\"pcol1\"," + - "\"type\":\"string\",\"comment\":\"\"},{\"name\":\"pcol2\",\"type\":\"string\"," + - "\"comment\":\"\"}],\"parameters\":{},\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: " + - "qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep, outStr.toString()); - - List> partVals = Arrays.asList(Arrays.asList("a", "b"), Arrays.asList("c", "d")); - for (List pv : partVals) { - StorageDescriptor sd = new StorageDescriptor(Arrays.asList(new FieldSchema("col1", "int", - ""), new FieldSchema("col2", "varchar(32)", "")), - "/tmp", null, null, false, 0, null, null, null, Collections.emptyMap()); - Partition p = new Partition(pv, "db0", "tab1", 0, 0, sd, Collections.emptyMap()); - store.addPartition(p); - } - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.PART_TABLE, "db0.tab1.a.b", null, conf, out, err); - Assert.assertEquals("{\"values\":[\"a\",\"b\"],\"dbName\":\"db0\",\"tableName\":\"tab1\"," + - "\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{}} sdHash: " + - "qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep, outStr.toString()); - - ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(false, "db0", "tab1"); - statsDesc.setPartName("pcol1=c/pcol2=d"); - ColumnStatisticsData csd1 = new ColumnStatisticsData(); - LongColumnStatsData lcsd = new LongColumnStatsData(1, 2); - lcsd.setLowValue(-95); - lcsd.setHighValue(95); - csd1.setLongStats(lcsd); - ColumnStatisticsData csd2 = new ColumnStatisticsData(); - csd2.setStringStats(new StringColumnStatsData(97, 18.78, 29, 397)); - List csos = Arrays.asList( - new ColumnStatisticsObj("col1", "int", csd1), - new ColumnStatisticsObj("col2", "varchar(32)", csd2)); - ColumnStatistics statsObj = new ColumnStatistics(statsDesc, csos); - store.updatePartitionColumnStatistics(statsObj, partVals.get(1)); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.PART_TABLE, "db0.tab1.c.d", null, conf, out, err); - Assert.assertEquals("{\"values\":[\"c\",\"d\"],\"dbName\":\"db0\",\"tableName\":\"tab1\"," + - "\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}} sdHash: qQTgZAi5VzgpozzFGmIVTQ " + - "stats: column col1: {\"colName\":\"col1\",\"colType\":\"int\"," + - "\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," + - "\"numDVs\":2,\"bitVectors\":\"\"}}} column col2: {\"colName\":\"col2\",\"colType\":\"varchar(32)\"," + - "\"statsData\":{\"stringStats\":{\"maxColLen\":97,\"avgColLen\":18.78,\"numNulls\":29," + - "\"numDVs\":397,\"bitVectors\":\"\"}}}" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.PART_TABLE, null, "db0.tab1", conf, out, err); - Assert.assertEquals("{\"values\":[\"a\",\"b\"],\"dbName\":\"db0\",\"tableName\":\"tab1\"," + - "\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{}} sdHash: qQTgZAi5VzgpozzFGmIVTQ " + - "stats:" + lsep + - "{\"values\":[\"c\",\"d\"],\"dbName\":\"db0\",\"tableName\":\"tab1\",\"createTime\":0," + - "\"lastAccessTime\":0,\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " + - "col1: {\"colName\":\"col1\",\"colType\":\"int\"," + - "\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," + - "\"numDVs\":2,\"bitVectors\":\"\"}}} column col2: {\"colName\":\"col2\",\"colType\":\"varchar(32)\"," + - "\"statsData\":{\"stringStats\":{\"maxColLen\":97,\"avgColLen\":18.78,\"numNulls\":29," + - "\"numDVs\":397,\"bitVectors\":\"\"}}}" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.PART_TABLE, null, "db0.tab1.a", conf, out, err); - Assert.assertEquals("{\"values\":[\"a\",\"b\"],\"dbName\":\"db0\",\"tableName\":\"tab1\"," + - "\"createTime\":0,\"lastAccessTime\":0,\"parameters\":{}} sdHash: qQTgZAi5VzgpozzFGmIVTQ " + - "stats:" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.SD_TABLE, "qQTgZAi5VzgpozzFGmIVTQ", null, conf, out, err); - Assert.assertEquals("{\"cols\":[{\"name\":\"col1\",\"type\":\"int\",\"comment\":\"\"}," + - "{\"name\":\"col2\",\"type\":\"varchar(32)\",\"comment\":\"\"}],\"compressed\":0," + - "\"numBuckets\":0,\"bucketCols\":[],\"sortCols\":[],\"storedAsSubDirectories\":0}" + lsep, - outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.SD_TABLE, null, "whatever", conf, out, err); - Assert.assertEquals("qQTgZAi5VzgpozzFGmIVTQ: {\"cols\":[{\"name\":\"col1\",\"type\":\"int\"," + - "\"comment\":\"\"}," + - "{\"name\":\"col2\",\"type\":\"varchar(32)\",\"comment\":\"\"}],\"compressed\":0," + - "\"numBuckets\":0,\"bucketCols\":[],\"sortCols\":[],\"storedAsSubDirectories\":0}" + lsep, - outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.SECURITY_TABLE, null, "whatever", conf, out, err); - Assert.assertEquals("No security related entries" + lsep, outStr.toString()); - - store.addMasterKey("this be a key"); - store.addToken("tokenid", "delegation token"); - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.SECURITY_TABLE, null, "whatever", conf, out, err); - Assert.assertEquals("Master key 0: this be a key" + lsep + - "Delegation token tokenid: delegation token" + lsep, outStr.toString()); - - outStr = new ByteArrayOutputStream(); - out = new PrintStream(outStr); - tool.go(false, HBaseReadWrite.SEQUENCES_TABLE, null, "whatever", conf, out, err); - Assert.assertEquals("master_key: 1" + lsep, outStr.toString()); - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool2.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool2.java deleted file mode 100644 index 0c95b2f..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool2.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.commons.lang.StringUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; - -/** - * This is in a separate class because install tests shouldn't set up the metastore first. - */ -public class TestHBaseSchemaTool2 extends HBaseIntegrationTests { - - private String lsep = System.getProperty("line.separator"); - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Test - public void install() { - ByteArrayOutputStream outStr = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(outStr); - ByteArrayOutputStream errStr = new ByteArrayOutputStream(); - PrintStream err = new PrintStream(errStr); - - HBaseSchemaTool tool = new HBaseSchemaTool(); - tool.install(conf, err); - tool.go(true, null, null, null, conf, out, err); - Assert.assertEquals(StringUtils.join(HBaseReadWrite.tableNames, lsep) + lsep, - outStr.toString()); - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java deleted file mode 100644 index 2cc1373..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ /dev/null @@ -1,1801 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; - -/** - * Integration tests with HBase Mini-cluster for HBaseStore - */ -public class TestHBaseStoreIntegration extends HBaseIntegrationTests { - - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStoreIntegration.class.getName()); - - @Rule public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException { - setupConnection(); - setupHBaseStore(); - } - - @Test - public void createDb() throws Exception { - String dbname = "mydb"; - Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - - Database d = store.getDatabase("mydb"); - Assert.assertEquals(dbname, d.getName()); - Assert.assertEquals("no description", d.getDescription()); - Assert.assertEquals("file:///tmp", d.getLocationUri()); - } - - @Test - public void dropDb() throws Exception { - String dbname = "anotherdb"; - Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - - Database d = store.getDatabase(dbname); - Assert.assertNotNull(d); - - store.dropDatabase(dbname); - thrown.expect(NoSuchObjectException.class); - store.getDatabase(dbname); - } - - @Test - public void getAllDbs() throws Exception { - String[] dbNames = new String[3]; - for (int i = 0; i < dbNames.length; i++) { - dbNames[i] = "db" + i; - Database db = new Database(dbNames[i], "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - } - - List dbs = store.getAllDatabases(); - Assert.assertEquals(3, dbs.size()); - String[] namesFromStore = dbs.toArray(new String[3]); - Arrays.sort(namesFromStore); - Assert.assertArrayEquals(dbNames, namesFromStore); - } - - @Test - public void getDbsRegex() throws Exception { - String[] dbNames = new String[3]; - for (int i = 0; i < dbNames.length; i++) { - dbNames[i] = "db" + i; - Database db = new Database(dbNames[i], "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - } - - List dbs = store.getDatabases("db1|db2"); - Assert.assertEquals(2, dbs.size()); - String[] namesFromStore = dbs.toArray(new String[2]); - Arrays.sort(namesFromStore); - Assert.assertArrayEquals(Arrays.copyOfRange(dbNames, 1, 3), namesFromStore); - - dbs = store.getDatabases("db*"); - Assert.assertEquals(3, dbs.size()); - namesFromStore = dbs.toArray(new String[3]); - Arrays.sort(namesFromStore); - Assert.assertArrayEquals(dbNames, namesFromStore); - } - - @Test - public void getFuncsRegex() throws Exception { - String dbname = "default"; - int now = (int)(System.currentTimeMillis()/1000); - String[] funcNames = new String[3]; - for (int i = 0; i < funcNames.length; i++) { - funcNames[i] = "func" + i; - store.createFunction(new Function(funcNames[i], dbname, "o.a.h.h.myfunc", "me", - PrincipalType.USER, now, FunctionType.JAVA, - Arrays.asList(new ResourceUri(ResourceType.JAR, - "file:/tmp/somewhere")))); - } - - List funcs = store.getFunctions(dbname, "func1|func2"); - Assert.assertEquals(2, funcs.size()); - String[] namesFromStore = funcs.toArray(new String[2]); - Arrays.sort(namesFromStore); - Assert.assertArrayEquals(Arrays.copyOfRange(funcNames, 1, 3), namesFromStore); - - funcs = store.getFunctions(dbname, "func*"); - Assert.assertEquals(3, funcs.size()); - namesFromStore = funcs.toArray(new String[3]); - Arrays.sort(namesFromStore); - Assert.assertArrayEquals(funcNames, namesFromStore); - - funcs = store.getFunctions("nosuchdb", "func*"); - Assert.assertEquals(0, funcs.size()); - } - - @Test - public void createTable() throws Exception { - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", "mytable"); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals("mytable", t.getTableName()); - } - - @Test - public void alterTable() throws Exception { - String tableName = "alttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - startTime += 10; - table.setLastAccessTime(startTime); - LOG.debug("XXX alter table test"); - store.alterTable("default", tableName, table); - - Table t = store.getTable("default", tableName); - LOG.debug("Alter table time " + t.getLastAccessTime()); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals(tableName, t.getTableName()); - Assert.assertEquals(startTime, t.getLastAccessTime()); - } - - @Test - public void getAllTables() throws Exception { - String dbNames[] = new String[]{"db0", "db1"}; // named to match getAllDbs so we get the - // right number of databases in that test. - String tableNames[] = new String[]{"curly", "larry", "moe"}; - - for (int i = 0; i < dbNames.length; i++) { - store.createDatabase(new Database(dbNames[i], "no description", "file:///tmp", - emptyParameters)); - } - - for (int i = 0; i < dbNames.length; i++) { - for (int j = 0; j < tableNames.length; j++) { - int startTime = (int) (System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, - 0, - serde, null, null, emptyParameters); - Table table = new Table(tableNames[j], dbNames[i], "me", startTime, startTime, 0, sd, - null, - emptyParameters, null, null, null); - store.createTable(table); - } - } - - List fetchedNames = store.getAllTables(dbNames[0]); - Assert.assertEquals(3, fetchedNames.size()); - String[] sortedFetchedNames = fetchedNames.toArray(new String[fetchedNames.size()]); - Arrays.sort(sortedFetchedNames); - Assert.assertArrayEquals(tableNames, sortedFetchedNames); - - List regexNames = store.getTables(dbNames[0], "*y"); - Assert.assertEquals(2, regexNames.size()); - String[] sortedRegexNames = regexNames.toArray(new String[regexNames.size()]); - Arrays.sort(sortedRegexNames); - Assert.assertArrayEquals(Arrays.copyOfRange(tableNames, 0, 2), sortedRegexNames); - - List fetchedTables = store.getTableObjectsByName(dbNames[1], - Arrays.asList(Arrays.copyOfRange(tableNames, 1, 3))); - Assert.assertEquals(2, fetchedTables.size()); - sortedFetchedNames = new String[fetchedTables.size()]; - for (int i = 0; i < fetchedTables.size(); i++) { - sortedFetchedNames[i] = fetchedTables.get(i).getTableName(); - } - Arrays.sort(sortedFetchedNames); - Assert.assertArrayEquals(Arrays.copyOfRange(tableNames, 1, 3), sortedFetchedNames); - } - - @Test - public void dropTable() throws Exception { - String tableName = "dtable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", tableName); - Assert.assertNotNull(t); - - store.dropTable("default", tableName); - Assert.assertNull(store.getTable("default", tableName)); - } - - @Test - public void createPartition() throws Exception { - String dbName = "default"; - String tableName = "myparttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = new ArrayList(); - vals.add("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(dbName, tableName, vals); - Assert.assertEquals(1, p.getSd().getColsSize()); - Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); - Assert.assertEquals("input", p.getSd().getInputFormat()); - Assert.assertEquals("output", p.getSd().getOutputFormat()); - Assert.assertEquals(dbName, p.getDbName()); - Assert.assertEquals(tableName, p.getTableName()); - Assert.assertEquals(1, p.getValuesSize()); - Assert.assertEquals("fred", p.getValues().get(0)); - } - - @Test - public void addPartitions() throws Exception { - String dbName = "default"; - String tableName = "addParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); - List partitions = new ArrayList(); - for (String val : partVals) { - List vals = new ArrayList(); - vals.add(val); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + val); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - partitions.add(part); - } - store.addPartitions(dbName, tableName, partitions); - - List partNames = store.listPartitionNames(dbName, tableName, (short) -1); - Assert.assertEquals(5, partNames.size()); - String[] names = partNames.toArray(new String[partNames.size()]); - Arrays.sort(names); - String[] canonicalNames = partVals.toArray(new String[partVals.size()]); - for (int i = 0; i < canonicalNames.length; i++) canonicalNames[i] = "pc=" + canonicalNames[i]; - Assert.assertArrayEquals(canonicalNames, names); - } - - @Test - public void alterPartitions() throws Exception { - String dbName = "default"; - String tableName = "alterParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); - List partitions = new ArrayList(); - List> allVals = new ArrayList>(); - for (String val : partVals) { - List vals = new ArrayList(); - allVals.add(vals); - vals.add(val); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + val); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - partitions.add(part); - } - store.addPartitions(dbName, tableName, partitions); - - for (Partition p : partitions) p.setLastAccessTime(startTime + 10); - store.alterPartitions(dbName, tableName, allVals, partitions); - - partitions = store.getPartitions(dbName, tableName, -1); - for (Partition part : partitions) { - Assert.assertEquals(startTime + 10, part.getLastAccessTime()); - } - } - - @Test - public void getPartitions() throws Exception { - String dbName = "default"; - String tableName = "manyParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); - for (String val : partVals) { - List vals = new ArrayList(); - vals.add(val); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + val); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(dbName, tableName, vals); - Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); - } - - List parts = store.getPartitions(dbName, tableName, -1); - Assert.assertEquals(5, parts.size()); - String[] pv = new String[5]; - for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0); - Arrays.sort(pv); - Assert.assertArrayEquals(pv, partVals.toArray(new String[5])); - } - - @Test - public void listPartitions() throws Exception { - String dbName = "default"; - String tableName = "listParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - partCols.add(new FieldSchema("region", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; - for (String[] pv : partVals) { - List vals = new ArrayList(); - for (String v : pv) vals.add(v); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - } - - List names = store.listPartitionNames(dbName, tableName, (short) -1); - Assert.assertEquals(2, names.size()); - String[] resultNames = names.toArray(new String[names.size()]); - Arrays.sort(resultNames); - Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america", - "pc=tomorrow/region=europe"}); - - List parts = store.getPartitionsByNames(dbName, tableName, names); - Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2])); - Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2])); - - store.dropPartitions(dbName, tableName, names); - List afterDropParts = store.getPartitions(dbName, tableName, -1); - Assert.assertEquals(0, afterDropParts.size()); - } - - @Test - public void listPartitionsWithPs() throws Exception { - String dbName = "default"; - String tableName = "listPartitionsWithPs"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("ds", "string", "")); - partCols.add(new FieldSchema("region", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - String[][] partVals = new String[][]{{"today", "north america"}, {"today", "europe"}, - {"tomorrow", "north america"}, {"tomorrow", "europe"}}; - for (String[] pv : partVals) { - List vals = new ArrayList(); - for (String v : pv) vals.add(v); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/ds=" + pv[0] + "/region=" + pv[1]); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - } - - // We only test listPartitionNamesPs since it calls listPartitionsPsWithAuth anyway. - // Test the case where we completely specify the partition - List partitionNames = - store.listPartitionNamesPs(dbName, tableName, Arrays.asList(partVals[0]), (short) -1); - Assert.assertEquals(1, partitionNames.size()); - Assert.assertEquals("ds=today/region=north america", partitionNames.get(0)); - - // Leave off the last value of the partition - partitionNames = - store.listPartitionNamesPs(dbName, tableName, Arrays.asList(partVals[0][0]), (short)-1); - Assert.assertEquals(2, partitionNames.size()); - String[] names = partitionNames.toArray(new String[partitionNames.size()]); - Arrays.sort(names); - Assert.assertArrayEquals(new String[] {"ds=today/region=europe", - "ds=today/region=north america"}, names); - - // Put a star in the last value of the partition - partitionNames = - store.listPartitionNamesPs(dbName, tableName, Arrays.asList("today", "*"), (short)-1); - Assert.assertEquals(2, partitionNames.size()); - names = partitionNames.toArray(new String[partitionNames.size()]); - Arrays.sort(names); - Assert.assertArrayEquals(new String[] {"ds=today/region=europe", - "ds=today/region=north america"}, names); - - // Put a star in the first value of the partition - partitionNames = - store.listPartitionNamesPs(dbName, tableName, Arrays.asList("*", "europe"), (short)-1); - Assert.assertEquals(2, partitionNames.size()); - names = partitionNames.toArray(new String[partitionNames.size()]); - Arrays.sort(names); - Assert.assertArrayEquals(new String[] {"ds=today/region=europe", - "ds=tomorrow/region=europe"}, names); - } - - - @Test - public void getPartitionsByFilter() throws Exception { - String dbName = "default"; - String tableName = "getPartitionsByFilter"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("ds", "string", "")); - partCols.add(new FieldSchema("region", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - String[][] partVals = new String[][]{{"20010101", "north america"}, {"20010101", "europe"}, - {"20010102", "north america"}, {"20010102", "europe"}, {"20010103", "north america"}}; - for (String[] pv : partVals) { - List vals = new ArrayList(); - for (String v : pv) vals.add(v); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/ds=" + pv[0] + "/region=" + pv[1]); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - } - - // We only test getPartitionsByFilter since it calls same code as getPartitionsByExpr anyway. - // Test the case where we completely specify the partition - List parts = null; - parts = store.getPartitionsByFilter(dbName, tableName, "ds > '20010101'", (short) -1); - checkPartVals(parts, "[20010102, north america]", "[20010102, europe]", - "[20010103, north america]"); - - parts = store.getPartitionsByFilter(dbName, tableName, "ds >= '20010102'", (short) -1); - checkPartVals(parts, "[20010102, north america]", "[20010102, europe]", - "[20010103, north america]"); - - parts = store.getPartitionsByFilter(dbName, tableName, - "ds >= '20010102' and region = 'europe' ", (short) -1); - // filtering on first partition is only implemented as of now, so it will - // not filter on region - checkPartVals(parts, "[20010102, north america]", "[20010102, europe]", - "[20010103, north america]"); - - parts = store.getPartitionsByFilter(dbName, tableName, - "ds >= '20010101' and ds < '20010102'", (short) -1); - checkPartVals(parts,"[20010101, north america]", "[20010101, europe]"); - - parts = store.getPartitionsByFilter(dbName, tableName, - "ds = '20010102' or ds < '20010103'", (short) -1); - checkPartVals(parts, "[20010101, north america]", "[20010101, europe]", - "[20010102, north america]", "[20010102, europe]"); - - // test conversion to DNF - parts = store.getPartitionsByFilter(dbName, tableName, - "ds = '20010102' and (ds = '20010102' or region = 'europe')", (short) -1); - // filtering on first partition is only implemented as of now, so it will not filter on region - checkPartVals(parts, "[20010102, north america]", "[20010102, europe]"); - - parts = store.getPartitionsByFilter(dbName, tableName, - "region = 'europe'", (short) -1); - // filtering on first partition is only implemented as of now, so it will not filter on region - checkPartVals(parts, "[20010101, north america]", "[20010101, europe]", - "[20010102, north america]", "[20010102, europe]", "[20010103, north america]"); - - } - - /** - * Check if the given partitions have same values as given partitions value strings - * @param parts given partitions - * @param expectedPartVals - */ - private void checkPartVals(List parts, String ... expectedPartVals) { - Assert.assertEquals("number of partitions", expectedPartVals.length, parts.size()); - Set partValStrings = new TreeSet(); - for(Partition part : parts) { - partValStrings.add(part.getValues().toString()); - } - partValStrings.equals(new TreeSet(Arrays.asList(expectedPartVals))); - } - - @Test - public void dropPartition() throws Exception { - String dbName = "default"; - String tableName = "myparttable2"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = Arrays.asList("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Assert.assertNotNull(store.getPartition(dbName, tableName, vals)); - store.dropPartition(dbName, tableName, vals); - thrown.expect(NoSuchObjectException.class); - store.getPartition(dbName, tableName, vals); - } - - @Test - public void createRole() throws Exception { - int now = (int)System.currentTimeMillis()/1000; - String roleName = "myrole"; - store.addRole(roleName, "me"); - - Role r = store.getRole(roleName); - Assert.assertEquals(roleName, r.getRoleName()); - Assert.assertEquals("me", r.getOwnerName()); - Assert.assertTrue(now <= r.getCreateTime()); - } - - @Test - public void dropRole() throws Exception { - String roleName = "anotherrole"; - store.addRole(roleName, "me"); - - Role r = store.getRole(roleName); - Assert.assertEquals(roleName, r.getRoleName()); - - store.removeRole(roleName); - thrown.expect(NoSuchObjectException.class); - store.getRole(roleName); - } - - @Test - public void grantRevokeRoles() throws Exception { - int now = (int)(System.currentTimeMillis()/1000); - String roleName1 = "role1"; - store.addRole(roleName1, "me"); - String roleName2 = "role2"; - store.addRole(roleName2, "me"); - - Role role1 = store.getRole(roleName1); - Role role2 = store.getRole(roleName2); - - store.grantRole(role1, "fred", PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role2, roleName1, PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - store.grantRole(role2, "fred", PrincipalType.USER, "admin", PrincipalType.ROLE, false); - - List roles = store.listRoles("fred", PrincipalType.USER); - Assert.assertEquals(3, roles.size()); - boolean sawRole1 = false, sawRole2 = false, sawPublic = false; - for (Role role : roles) { - if (role.getRoleName().equals(roleName1)) { - sawRole1 = true; - } else if (role.getRoleName().equals(roleName2)) { - sawRole2 = true; - } else if (role.getRoleName().equals(HiveMetaStore.PUBLIC)) { - sawPublic = true; - } else { - Assert.fail("Unknown role name " + role.getRoleName()); - } - } - Assert.assertTrue(sawRole1 && sawRole2 && sawPublic); - - roles = store.listRoles("fred", PrincipalType.ROLE); - Assert.assertEquals(0, roles.size()); - - roles = store.listRoles(roleName1, PrincipalType.ROLE); - Assert.assertEquals(1, roles.size()); - Role role = roles.get(0); - Assert.assertEquals(roleName2, role.getRoleName()); - - // Test listing all members in a role - List grants = store.listRoleMembers(roleName1); - Assert.assertEquals(1, grants.size()); - Assert.assertEquals("fred", grants.get(0).getPrincipalName()); - Assert.assertEquals(PrincipalType.USER, grants.get(0).getPrincipalType()); - Assert.assertTrue("Expected grant time of " + now + " got " + grants.get(0).getGrantTime(), - grants.get(0).getGrantTime() >= now); - Assert.assertEquals("bob", grants.get(0).getGrantorName()); - Assert.assertEquals(PrincipalType.USER, grants.get(0).getGrantorPrincipalType()); - Assert.assertFalse(grants.get(0).isGrantOption()); - - grants = store.listRoleMembers(roleName2); - Assert.assertEquals(2, grants.size()); - boolean sawFred = false; - sawRole1 = false; - for (RolePrincipalGrant m : grants) { - if ("fred".equals(m.getPrincipalName())) sawFred = true; - else if (roleName1.equals(m.getPrincipalName())) sawRole1 = true; - else Assert.fail("Unexpected principal " + m.getPrincipalName()); - } - Assert.assertTrue(sawFred && sawRole1); - - // Revoke a role with grant option, make sure it just goes to no grant option - store.revokeRole(role2, roleName1, PrincipalType.ROLE, true); - roles = store.listRoles(roleName1, PrincipalType.ROLE); - Assert.assertEquals(1, roles.size()); - Assert.assertEquals(roleName2, roles.get(0).getRoleName()); - - grants = store.listRoleMembers(roleName1); - Assert.assertFalse(grants.get(0).isGrantOption()); - - // Drop a role, make sure it is properly removed from the map - store.removeRole(roleName1); - roles = store.listRoles("fred", PrincipalType.USER); - Assert.assertEquals(2, roles.size()); - sawRole2 = sawPublic = false; - for (Role m : roles) { - if (m.getRoleName().equals(roleName2)) sawRole2 = true; - else if (m.getRoleName().equals(HiveMetaStore.PUBLIC)) sawPublic = true; - else Assert.fail("Unknown role " + m.getRoleName()); - } - Assert.assertTrue(sawRole2 && sawPublic); - roles = store.listRoles(roleName1, PrincipalType.ROLE); - Assert.assertEquals(0, roles.size()); - - // Revoke a role without grant option, make sure it goes away - store.revokeRole(role2, "fred", PrincipalType.USER, false); - roles = store.listRoles("fred", PrincipalType.USER); - Assert.assertEquals(1, roles.size()); - Assert.assertEquals(HiveMetaStore.PUBLIC, roles.get(0).getRoleName()); - } - - @Test - public void userToRoleMap() throws Exception { - String roleName1 = "utrm1"; - store.addRole(roleName1, "me"); - String roleName2 = "utrm2"; - store.addRole(roleName2, "me"); - String user1 = "wilma"; - String user2 = "betty"; - - Role role1 = store.getRole(roleName1); - Role role2 = store.getRole(roleName2); - - store.grantRole(role1, user1, PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role1, roleName2, PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - - List roles = HBaseReadWrite.getInstance().getUserRoles(user1); - Assert.assertEquals(2, roles.size()); - String[] roleNames = roles.toArray(new String[roles.size()]); - Arrays.sort(roleNames); - Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); - - store.grantRole(role2, user1, PrincipalType.USER, "admin", PrincipalType.ROLE, false); - store.grantRole(role1, user2, PrincipalType.USER, "bob", PrincipalType.USER, false); - - HBaseReadWrite.setConf(conf); - roles = HBaseReadWrite.getInstance().getUserRoles(user2); - Assert.assertEquals(2, roles.size()); - roleNames = roles.toArray(new String[roles.size()]); - Arrays.sort(roleNames); - Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); - - store.revokeRole(role1, roleName2, PrincipalType.ROLE, false); - - // user1 should still have both roles since she was granted into role1 specifically. user2 - // should only have role2 now since role2 was revoked from role1. - roles = HBaseReadWrite.getInstance().getUserRoles(user1); - Assert.assertEquals(2, roles.size()); - roleNames = roles.toArray(new String[roles.size()]); - Arrays.sort(roleNames); - Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); - - roles = HBaseReadWrite.getInstance().getUserRoles(user2); - Assert.assertEquals(1, roles.size()); - Assert.assertEquals(roleName1, roles.get(0)); - } - - @Test - public void userToRoleMapOnDrop() throws Exception { - String roleName1 = "utrmod1"; - store.addRole(roleName1, "me"); - String roleName2 = "utrmod2"; - store.addRole(roleName2, "me"); - String user1 = "pebbles"; - String user2 = "bam-bam"; - - Role role1 = store.getRole(roleName1); - Role role2 = store.getRole(roleName2); - - store.grantRole(role1, user1, PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role1, roleName2, PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - store.grantRole(role1, user2, PrincipalType.USER, "bob", PrincipalType.USER, false); - - List roles = HBaseReadWrite.getInstance().getUserRoles(user2); - Assert.assertEquals(2, roles.size()); - String[] roleNames = roles.toArray(new String[roles.size()]); - Arrays.sort(roleNames); - Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); - - store.removeRole(roleName2); - - HBaseReadWrite.setConf(conf); - roles = HBaseReadWrite.getInstance().getUserRoles(user1); - Assert.assertEquals(1, roles.size()); - Assert.assertEquals(roleName1, roles.get(0)); - - roles = HBaseReadWrite.getInstance().getUserRoles(user2); - Assert.assertEquals(1, roles.size()); - Assert.assertEquals(roleName1, roles.get(0)); - } - - @Test - public void grantRevokeGlobalPrivileges() throws Exception { - doGrantRevoke(HiveObjectType.GLOBAL, null, null, new String[] {"grpg1", "grpg2"}, - new String[] {"bugs", "elmer", "daphy", "wiley"}); - } - - @Test - public void grantRevokeDbPrivileges() throws Exception { - String dbName = "grdbp_db"; - try { - Database db = new Database(dbName, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - doGrantRevoke(HiveObjectType.DATABASE, dbName, null, - new String[] {"grdbp_role1", "grdbp_role2"}, - new String[] {"fred", "barney", "wilma", "betty"}); - } finally { - store.dropDatabase(dbName); - } - } - - @Test - public void grantRevokeTablePrivileges() throws Exception { - String dbName = "grtp_db"; - String tableName = "grtp_table"; - try { - Database db = new Database(dbName, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - doGrantRevoke(HiveObjectType.TABLE, dbName, tableName, - new String[] {"grtp_role1", "grtp_role2"}, - new String[] {"batman", "robin", "superman", "wonderwoman"}); - - } finally { - if (store.getTable(dbName, tableName) != null) store.dropTable(dbName, tableName); - store.dropDatabase(dbName); - } - } - - private void doGrantRevoke(HiveObjectType objectType, String dbName, String tableName, - String[] roleNames, String[] userNames) - throws Exception { - store.addRole(roleNames[0], "me"); - store.addRole(roleNames[1], "me"); - int now = (int)(System.currentTimeMillis() / 1000); - - Role role1 = store.getRole(roleNames[0]); - Role role2 = store.getRole(roleNames[1]); - store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); - - List privileges = new ArrayList(); - HiveObjectRef hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - PrivilegeGrantInfo grantInfo = - new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); - HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, - grantInfo); - privileges.add(hop); - - hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); - privileges.add(hop); - - hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - grantInfo = new PrivilegeGrantInfo("exec", now, "me", PrincipalType.USER, false); - hop = new HiveObjectPrivilege(hiveObjRef, roleNames[1], PrincipalType.ROLE, grantInfo); - privileges.add(hop); - - hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - grantInfo = new PrivilegeGrantInfo("create", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, userNames[2], PrincipalType.USER, grantInfo); - privileges.add(hop); - - hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - grantInfo = new PrivilegeGrantInfo("create2", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, userNames[2], PrincipalType.USER, grantInfo); - privileges.add(hop); - - PrivilegeBag pBag = new PrivilegeBag(privileges); - store.grantPrivileges(pBag); - - PrincipalPrivilegeSet pps = getPPS(objectType, dbName, tableName, userNames[0]); - - Assert.assertEquals(1, pps.getUserPrivilegesSize()); - Assert.assertEquals(1, pps.getUserPrivileges().get(userNames[0]).size()); - grantInfo = pps.getUserPrivileges().get(userNames[0]).get(0); - Assert.assertEquals("read", grantInfo.getPrivilege()); - Assert.assertTrue(now <= grantInfo.getCreateTime()); - Assert.assertEquals("me", grantInfo.getGrantor()); - Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); - Assert.assertFalse(grantInfo.isGrantOption()); - - Assert.assertEquals(2, pps.getRolePrivilegesSize()); - Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[0]).size()); - grantInfo = pps.getRolePrivileges().get(roleNames[0]).get(0); - Assert.assertEquals("write", grantInfo.getPrivilege()); - Assert.assertTrue(now <= grantInfo.getCreateTime()); - Assert.assertEquals("me", grantInfo.getGrantor()); - Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); - Assert.assertTrue(grantInfo.isGrantOption()); - - Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[1]).size()); - grantInfo = pps.getRolePrivileges().get(roleNames[1]).get(0); - Assert.assertEquals("exec", grantInfo.getPrivilege()); - Assert.assertTrue(now <= grantInfo.getCreateTime()); - Assert.assertEquals("me", grantInfo.getGrantor()); - Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); - Assert.assertFalse(grantInfo.isGrantOption()); - - pps = getPPS(objectType, dbName, tableName, userNames[1]); - - Assert.assertEquals(0, pps.getUserPrivilegesSize()); - - Assert.assertEquals(1, pps.getRolePrivilegesSize()); - Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[1]).size()); - grantInfo = pps.getRolePrivileges().get(roleNames[1]).get(0); - Assert.assertEquals("exec", grantInfo.getPrivilege()); - Assert.assertTrue(now <= grantInfo.getCreateTime()); - Assert.assertEquals("me", grantInfo.getGrantor()); - Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); - Assert.assertFalse(grantInfo.isGrantOption()); - - pps = getPPS(objectType, dbName, tableName, userNames[2]); - - Assert.assertEquals(1, pps.getUserPrivilegesSize()); - Assert.assertEquals(2, pps.getUserPrivileges().get(userNames[2]).size()); - Assert.assertEquals(0, pps.getRolePrivilegesSize()); - - pps = getPPS(objectType, dbName, tableName, userNames[3]); - Assert.assertEquals(0, pps.getUserPrivilegesSize()); - Assert.assertEquals(0, pps.getRolePrivilegesSize()); - - // Test that removing role removes the role grants - store.removeRole(roleNames[1]); - checkRoleRemovedFromAllPrivileges(objectType, dbName, tableName, roleNames[1]); - pps = getPPS(objectType, dbName, tableName, userNames[0]); - - Assert.assertEquals(1, pps.getRolePrivilegesSize()); - Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[0]).size()); - - pps = getPPS(objectType, dbName, tableName, userNames[1]); - - Assert.assertEquals(0, pps.getRolePrivilegesSize()); - - // Test that revoking with grant option = true just removes grant option - privileges.clear(); - hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); - privileges.add(hop); - - hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); - grantInfo = new PrivilegeGrantInfo("create2", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, userNames[2], PrincipalType.USER, grantInfo); - privileges.add(hop); - - pBag = new PrivilegeBag(privileges); - store.revokePrivileges(pBag, true); - pps = getPPS(objectType, dbName, tableName, userNames[0]); - - Assert.assertEquals(1, pps.getRolePrivilegesSize()); - Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[0]).size()); - grantInfo = pps.getRolePrivileges().get(roleNames[0]).get(0); - Assert.assertEquals("write", grantInfo.getPrivilege()); - Assert.assertTrue(now <= grantInfo.getCreateTime()); - Assert.assertEquals("me", grantInfo.getGrantor()); - Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); - Assert.assertFalse(grantInfo.isGrantOption()); - - pps = getPPS(objectType, dbName, tableName, userNames[2]); - - Assert.assertEquals(1, pps.getUserPrivilegesSize()); - Assert.assertEquals(2, pps.getUserPrivileges().get(userNames[2]).size()); - for (PrivilegeGrantInfo pgi : pps.getUserPrivileges().get(userNames[2])) { - if (pgi.getPrivilege().equals("create")) Assert.assertTrue(pgi.isGrantOption()); - else if (pgi.getPrivilege().equals("create2")) Assert.assertFalse(pgi.isGrantOption()); - else Assert.fail("huh?"); - } - - // Test revoking revokes - store.revokePrivileges(pBag, false); - - pps = getPPS(objectType, dbName, tableName, userNames[0]); - - Assert.assertEquals(1, pps.getUserPrivilegesSize()); - Assert.assertEquals(1, pps.getRolePrivilegesSize()); - Assert.assertEquals(0, pps.getRolePrivileges().get(roleNames[0]).size()); - - pps = getPPS(objectType, dbName, tableName, userNames[2]); - Assert.assertEquals(1, pps.getUserPrivilegesSize()); - Assert.assertEquals(1, pps.getUserPrivileges().get(userNames[2]).size()); - Assert.assertEquals("create", pps.getUserPrivileges().get(userNames[2]).get(0).getPrivilege()); - Assert.assertEquals(0, pps.getRolePrivilegesSize()); - } - - private PrincipalPrivilegeSet getPPS(HiveObjectType objectType, String dbName, String tableName, - String userName) - throws InvalidObjectException, MetaException { - switch (objectType) { - case GLOBAL: return store.getUserPrivilegeSet(userName, null); - case DATABASE: return store.getDBPrivilegeSet(dbName, userName, null); - case TABLE: return store.getTablePrivilegeSet(dbName, tableName, userName, null); - default: throw new RuntimeException("huh?"); - } - } - - private void checkRoleRemovedFromAllPrivileges(HiveObjectType objectType, String dbName, - String tableName, String roleName) - throws IOException, NoSuchObjectException, MetaException { - List pgi = null; - switch (objectType) { - case GLOBAL: - pgi = HBaseReadWrite.getInstance().getGlobalPrivs().getRolePrivileges().get(roleName); - break; - - case DATABASE: - pgi = store.getDatabase(dbName).getPrivileges().getRolePrivileges().get(roleName); - break; - - case TABLE: - pgi = store.getTable(dbName, tableName).getPrivileges().getRolePrivileges().get(roleName); - break; - - default: - Assert.fail(); - } - - Assert.assertNull("Expected null for role " + roleName + " for type " + objectType.toString() - + " with db " + dbName + " and table " + tableName, pgi); - } - - @Test - public void listDbGrants() throws Exception { - String dbNames[] = new String[] {"ldbg_db1", "ldbg_db2"}; - try { - Database db = new Database(dbNames[0], "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - db = new Database(dbNames[1], "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - String[] roleNames = new String[]{"ldbg_role1", "ldbg_role2"}; - String[] userNames = new String[]{"frodo", "sam"}; - - store.addRole(roleNames[0], "me"); - store.addRole(roleNames[1], "me"); - int now = (int)(System.currentTimeMillis() / 1000); - - Role role1 = store.getRole(roleNames[0]); - Role role2 = store.getRole(roleNames[1]); - store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); - - List privileges = new ArrayList(); - HiveObjectRef hiveObjRef = - new HiveObjectRef(HiveObjectType.DATABASE, dbNames[0], null, null, null); - PrivilegeGrantInfo grantInfo = - new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); - HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, - grantInfo); - privileges.add(hop); - - grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); - privileges.add(hop); - - PrivilegeBag pBag = new PrivilegeBag(privileges); - store.grantPrivileges(pBag); - - List hops = - store.listPrincipalDBGrants(roleNames[0], PrincipalType.ROLE, dbNames[0]); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalDBGrants(userNames[0], PrincipalType.USER, dbNames[0]); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalDBGrants(roleNames[1], PrincipalType.ROLE, dbNames[0]); - Assert.assertEquals(0, hops.size()); - hops = store.listPrincipalDBGrants(userNames[1], PrincipalType.USER, dbNames[0]); - Assert.assertEquals(0, hops.size()); - - hops = store.listPrincipalDBGrants(roleNames[0], PrincipalType.ROLE, dbNames[1]); - Assert.assertEquals(0, hops.size()); - hops = store.listPrincipalDBGrants(userNames[0], PrincipalType.USER, dbNames[1]); - Assert.assertEquals(0, hops.size()); - - hops = store.listDBGrantsAll(dbNames[0]); - Assert.assertEquals(2, hops.size()); - boolean sawUser = false, sawRole = false; - for (HiveObjectPrivilege h : hops) { - if (h.getPrincipalName().equals(userNames[0])) { - Assert.assertEquals(PrincipalType.USER, h.getPrincipalType()); - Assert.assertEquals(HiveObjectType.DATABASE, h.getHiveObject().getObjectType()); - Assert.assertEquals("read", h.getGrantInfo().getPrivilege()); - sawUser = true; - } else if (h.getPrincipalName().equals(roleNames[0])) { - Assert.assertEquals(PrincipalType.ROLE, h.getPrincipalType()); - Assert.assertEquals(HiveObjectType.DATABASE, h.getHiveObject().getObjectType()); - Assert.assertEquals("write", h.getGrantInfo().getPrivilege()); - sawRole = true; - } - } - Assert.assertTrue(sawUser && sawRole); - - hops = store.listPrincipalDBGrantsAll(roleNames[0], PrincipalType.ROLE); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalDBGrantsAll(userNames[0], PrincipalType.USER); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalDBGrantsAll(roleNames[1], PrincipalType.ROLE); - Assert.assertEquals(0, hops.size()); - hops = store.listPrincipalDBGrantsAll(userNames[1], PrincipalType.USER); - Assert.assertEquals(0, hops.size()); - - - } finally { - store.dropDatabase(dbNames[0]); - store.dropDatabase(dbNames[1]); - } - } - - @Test - public void listGlobalGrants() throws Exception { - String[] roleNames = new String[]{"lgg_role1", "lgg_role2"}; - String[] userNames = new String[]{"merry", "pippen"}; - - store.addRole(roleNames[0], "me"); - store.addRole(roleNames[1], "me"); - int now = (int)(System.currentTimeMillis() / 1000); - - Role role1 = store.getRole(roleNames[0]); - Role role2 = store.getRole(roleNames[1]); - store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); - - List privileges = new ArrayList(); - HiveObjectRef hiveObjRef = - new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); - PrivilegeGrantInfo grantInfo = - new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); - HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, - grantInfo); - privileges.add(hop); - - grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); - privileges.add(hop); - - PrivilegeBag pBag = new PrivilegeBag(privileges); - store.grantPrivileges(pBag); - - List hops = - store.listPrincipalGlobalGrants(roleNames[0], PrincipalType.ROLE); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.GLOBAL, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalGlobalGrants(userNames[0], PrincipalType.USER); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.GLOBAL, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalGlobalGrants(roleNames[1], PrincipalType.ROLE); - Assert.assertEquals(0, hops.size()); - hops = store.listPrincipalGlobalGrants(userNames[1], PrincipalType.USER); - Assert.assertEquals(0, hops.size()); - - hops = store.listGlobalGrantsAll(); - Assert.assertEquals(2, hops.size()); - boolean sawUser = false, sawRole = false; - for (HiveObjectPrivilege h : hops) { - if (h.getPrincipalName().equals(userNames[0])) { - Assert.assertEquals(PrincipalType.USER, h.getPrincipalType()); - Assert.assertEquals(HiveObjectType.GLOBAL, h.getHiveObject().getObjectType()); - Assert.assertEquals("read", h.getGrantInfo().getPrivilege()); - sawUser = true; - } else if (h.getPrincipalName().equals(roleNames[0])) { - Assert.assertEquals(PrincipalType.ROLE, h.getPrincipalType()); - Assert.assertEquals(HiveObjectType.GLOBAL, h.getHiveObject().getObjectType()); - Assert.assertEquals("write", h.getGrantInfo().getPrivilege()); - sawRole = true; - } - } - Assert.assertTrue(sawUser && sawRole); - } - - @Test - public void listTableGrants() throws Exception { - String dbName = "ltg_db"; - String[] tableNames = new String[] {"ltg_t1", "ltg_t2"}; - try { - Database db = new Database(dbName, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableNames[0], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - table = new Table(tableNames[1], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - String[] roleNames = new String[]{"ltg_role1", "ltg_role2"}; - String[] userNames = new String[]{"gandalf", "radagast"}; - - store.addRole(roleNames[0], "me"); - store.addRole(roleNames[1], "me"); - int now = (int)(System.currentTimeMillis() / 1000); - - Role role1 = store.getRole(roleNames[0]); - Role role2 = store.getRole(roleNames[1]); - store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); - store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); - store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); - - List privileges = new ArrayList(); - HiveObjectRef hiveObjRef = - new HiveObjectRef(HiveObjectType.TABLE, dbName, tableNames[0], null, null); - PrivilegeGrantInfo grantInfo = - new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); - HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, - grantInfo); - privileges.add(hop); - - grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); - hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); - privileges.add(hop); - - PrivilegeBag pBag = new PrivilegeBag(privileges); - store.grantPrivileges(pBag); - - List hops = - store.listAllTableGrants(roleNames[0], PrincipalType.ROLE, dbName, tableNames[0]); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listAllTableGrants(userNames[0], PrincipalType.USER, dbName, tableNames[0]); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listAllTableGrants(roleNames[1], PrincipalType.ROLE, dbName, tableNames[0]); - Assert.assertEquals(0, hops.size()); - hops = store.listAllTableGrants(userNames[1], PrincipalType.USER, dbName, tableNames[0]); - Assert.assertEquals(0, hops.size()); - - hops = store.listAllTableGrants(roleNames[0], PrincipalType.ROLE, dbName, tableNames[1]); - Assert.assertEquals(0, hops.size()); - hops = store.listAllTableGrants(userNames[0], PrincipalType.USER, dbName, tableNames[1]); - Assert.assertEquals(0, hops.size()); - - hops = store.listTableGrantsAll(dbName, tableNames[0]); - Assert.assertEquals(2, hops.size()); - boolean sawUser = false, sawRole = false; - for (HiveObjectPrivilege h : hops) { - if (h.getPrincipalName().equals(userNames[0])) { - Assert.assertEquals(PrincipalType.USER, h.getPrincipalType()); - Assert.assertEquals(HiveObjectType.TABLE, h.getHiveObject().getObjectType()); - Assert.assertEquals("read", h.getGrantInfo().getPrivilege()); - sawUser = true; - } else if (h.getPrincipalName().equals(roleNames[0])) { - Assert.assertEquals(PrincipalType.ROLE, h.getPrincipalType()); - Assert.assertEquals(HiveObjectType.TABLE, h.getHiveObject().getObjectType()); - Assert.assertEquals("write", h.getGrantInfo().getPrivilege()); - sawRole = true; - } - } - Assert.assertTrue(sawUser && sawRole); - - hops = store.listPrincipalTableGrantsAll(roleNames[0], PrincipalType.ROLE); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalTableGrantsAll(userNames[0], PrincipalType.USER); - Assert.assertEquals(1, hops.size()); - Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); - Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); - Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); - - hops = store.listPrincipalDBGrantsAll(roleNames[1], PrincipalType.ROLE); - Assert.assertEquals(0, hops.size()); - hops = store.listPrincipalDBGrantsAll(userNames[1], PrincipalType.USER); - Assert.assertEquals(0, hops.size()); - - - } finally { - store.dropTable(dbName, tableNames[0]); - store.dropTable(dbName, tableNames[1]); - store.dropDatabase(dbName); - } - } - - @Test - public void tableStatistics() throws Exception { - long now = System.currentTimeMillis(); - String dbname = "default"; - String tableName = "statstable"; - String boolcol = "boolcol"; - String longcol = "longcol"; - String doublecol = "doublecol"; - String stringcol = "stringcol"; - String binarycol = "bincol"; - String decimalcol = "deccol"; - long trues = 37; - long falses = 12; - long booleanNulls = 2; - long longHigh = 120938479124L; - long longLow = -12341243213412124L; - long longNulls = 23; - long longDVs = 213L; - double doubleHigh = 123423.23423; - double doubleLow = 0.00001234233; - long doubleNulls = 92; - long doubleDVs = 1234123421L; - long strMaxLen = 1234; - double strAvgLen = 32.3; - long strNulls = 987; - long strDVs = 906; - long binMaxLen = 123412987L; - double binAvgLen = 76.98; - long binNulls = 976998797L; - Decimal decHigh = new Decimal(); - decHigh.setScale((short)3); - decHigh.setUnscaled("3876".getBytes()); // I have no clue how this is translated, but it - // doesn't matter - Decimal decLow = new Decimal(); - decLow.setScale((short)3); - decLow.setUnscaled("38".getBytes()); - long decNulls = 13; - long decDVs = 923947293L; - - List cols = new ArrayList(); - cols.add(new FieldSchema(boolcol, "boolean", "nocomment")); - cols.add(new FieldSchema(longcol, "long", "nocomment")); - cols.add(new FieldSchema(doublecol, "double", "nocomment")); - cols.add(new FieldSchema(stringcol, "varchar(32)", "nocomment")); - cols.add(new FieldSchema(binarycol, "binary", "nocomment")); - cols.add(new FieldSchema(decimalcol, "decimal(5, 3)", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - ColumnStatistics stats = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(now); - desc.setDbName(dbname); - desc.setTableName(tableName); - desc.setIsTblLevel(true); - stats.setStatsDesc(desc); - - // Do one column of each type - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName(boolcol); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData boolData = new BooleanColumnStatsData(); - boolData.setNumTrues(trues); - boolData.setNumFalses(falses); - boolData.setNumNulls(booleanNulls); - data.setBooleanStats(boolData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - obj = new ColumnStatisticsObj(); - obj.setColName(longcol); - obj.setColType("long"); - data = new ColumnStatisticsData(); - LongColumnStatsData longData = new LongColumnStatsData(); - longData.setHighValue(longHigh); - longData.setLowValue(longLow); - longData.setNumNulls(longNulls); - longData.setNumDVs(longDVs); - data.setLongStats(longData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - obj = new ColumnStatisticsObj(); - obj.setColName(doublecol); - obj.setColType("double"); - data = new ColumnStatisticsData(); - DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); - doubleData.setHighValue(doubleHigh); - doubleData.setLowValue(doubleLow); - doubleData.setNumNulls(doubleNulls); - doubleData.setNumDVs(doubleDVs); - data.setDoubleStats(doubleData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - store.updateTableColumnStatistics(stats); - - stats = store.getTableColumnStatistics(dbname, tableName, - Arrays.asList(boolcol, longcol, doublecol)); - - // We'll check all of the individual values later. - Assert.assertEquals(3, stats.getStatsObjSize()); - - // check that we can fetch just some of the columns - stats = store.getTableColumnStatistics(dbname, tableName, Arrays.asList(boolcol)); - Assert.assertEquals(1, stats.getStatsObjSize()); - - stats = new ColumnStatistics(); - stats.setStatsDesc(desc); - - - obj = new ColumnStatisticsObj(); - obj.setColName(stringcol); - obj.setColType("string"); - data = new ColumnStatisticsData(); - StringColumnStatsData strData = new StringColumnStatsData(); - strData.setMaxColLen(strMaxLen); - strData.setAvgColLen(strAvgLen); - strData.setNumNulls(strNulls); - strData.setNumDVs(strDVs); - data.setStringStats(strData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - obj = new ColumnStatisticsObj(); - obj.setColName(binarycol); - obj.setColType("binary"); - data = new ColumnStatisticsData(); - BinaryColumnStatsData binData = new BinaryColumnStatsData(); - binData.setMaxColLen(binMaxLen); - binData.setAvgColLen(binAvgLen); - binData.setNumNulls(binNulls); - data.setBinaryStats(binData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - obj = new ColumnStatisticsObj(); - obj.setColName(decimalcol); - obj.setColType("decimal(5,3)"); - data = new ColumnStatisticsData(); - DecimalColumnStatsData decData = new DecimalColumnStatsData(); - LOG.debug("Setting decimal high value to " + decHigh.getScale() + " <" + new String(decHigh.getUnscaled()) + ">"); - decData.setHighValue(decHigh); - decData.setLowValue(decLow); - decData.setNumNulls(decNulls); - decData.setNumDVs(decDVs); - data.setDecimalStats(decData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - store.updateTableColumnStatistics(stats); - - stats = store.getTableColumnStatistics(dbname, tableName, - Arrays.asList(boolcol, longcol, doublecol, stringcol, binarycol, decimalcol)); - Assert.assertEquals(now, stats.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(dbname, stats.getStatsDesc().getDbName()); - Assert.assertEquals(tableName, stats.getStatsDesc().getTableName()); - Assert.assertTrue(stats.getStatsDesc().isIsTblLevel()); - - Assert.assertEquals(6, stats.getStatsObjSize()); - - ColumnStatisticsData colData = stats.getStatsObj().get(0).getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, colData.getSetField()); - boolData = colData.getBooleanStats(); - Assert.assertEquals(trues, boolData.getNumTrues()); - Assert.assertEquals(falses, boolData.getNumFalses()); - Assert.assertEquals(booleanNulls, boolData.getNumNulls()); - - colData = stats.getStatsObj().get(1).getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, colData.getSetField()); - longData = colData.getLongStats(); - Assert.assertEquals(longHigh, longData.getHighValue()); - Assert.assertEquals(longLow, longData.getLowValue()); - Assert.assertEquals(longNulls, longData.getNumNulls()); - Assert.assertEquals(longDVs, longData.getNumDVs()); - - colData = stats.getStatsObj().get(2).getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, colData.getSetField()); - doubleData = colData.getDoubleStats(); - Assert.assertEquals(doubleHigh, doubleData.getHighValue(), 0.01); - Assert.assertEquals(doubleLow, doubleData.getLowValue(), 0.01); - Assert.assertEquals(doubleNulls, doubleData.getNumNulls()); - Assert.assertEquals(doubleDVs, doubleData.getNumDVs()); - - colData = stats.getStatsObj().get(3).getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, colData.getSetField()); - strData = colData.getStringStats(); - Assert.assertEquals(strMaxLen, strData.getMaxColLen()); - Assert.assertEquals(strAvgLen, strData.getAvgColLen(), 0.01); - Assert.assertEquals(strNulls, strData.getNumNulls()); - Assert.assertEquals(strDVs, strData.getNumDVs()); - - colData = stats.getStatsObj().get(4).getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, colData.getSetField()); - binData = colData.getBinaryStats(); - Assert.assertEquals(binMaxLen, binData.getMaxColLen()); - Assert.assertEquals(binAvgLen, binData.getAvgColLen(), 0.01); - Assert.assertEquals(binNulls, binData.getNumNulls()); - - colData = stats.getStatsObj().get(5).getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, colData.getSetField()); - decData = colData.getDecimalStats(); - Assert.assertEquals(decHigh, decData.getHighValue()); - Assert.assertEquals(decLow, decData.getLowValue()); - Assert.assertEquals(decNulls, decData.getNumNulls()); - Assert.assertEquals(decDVs, decData.getNumDVs()); - - } - - @Test - public void partitionStatistics() throws Exception { - long now = System.currentTimeMillis(); - String dbname = "default"; - String tableName = "statspart"; - String[] partNames = {"ds=today", "ds=yesterday"}; - String[] partVals = {"today", "yesterday"}; - String boolcol = "boolcol"; - String longcol = "longcol"; - String doublecol = "doublecol"; - String stringcol = "stringcol"; - String binarycol = "bincol"; - String decimalcol = "deccol"; - long trues = 37; - long falses = 12; - long booleanNulls = 2; - long strMaxLen = 1234; - double strAvgLen = 32.3; - long strNulls = 987; - long strDVs = 906; - - List cols = new ArrayList(); - cols.add(new FieldSchema(boolcol, "boolean", "nocomment")); - cols.add(new FieldSchema(longcol, "long", "nocomment")); - cols.add(new FieldSchema(doublecol, "double", "nocomment")); - cols.add(new FieldSchema(stringcol, "varchar(32)", "nocomment")); - cols.add(new FieldSchema(binarycol, "binary", "nocomment")); - cols.add(new FieldSchema(decimalcol, "decimal(5, 3)", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - for (String partVal : partVals) { - Partition part = new Partition(Arrays.asList(partVal), dbname, tableName, (int) now / 1000, - (int) now / 1000, sd, emptyParameters); - store.addPartition(part); - } - - for (int i = 0; i < partNames.length; i++) { - ColumnStatistics stats = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(now); - desc.setDbName(dbname); - desc.setTableName(tableName); - desc.setIsTblLevel(false); - desc.setPartName(partNames[i]); - stats.setStatsDesc(desc); - - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName(boolcol); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData boolData = new BooleanColumnStatsData(); - boolData.setNumTrues(trues); - boolData.setNumFalses(falses); - boolData.setNumNulls(booleanNulls); - data.setBooleanStats(boolData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(stats, Arrays.asList(partVals[i])); - } - - List statsList = store.getPartitionColumnStatistics(dbname, tableName, - Arrays.asList(partNames), Arrays.asList(boolcol)); - - Assert.assertEquals(2, statsList.size()); - for (int i = 0; i < partNames.length; i++) { - Assert.assertEquals(1, statsList.get(i).getStatsObjSize()); - } - - for (int i = 0; i < partNames.length; i++) { - ColumnStatistics stats = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(now); - desc.setDbName(dbname); - desc.setTableName(tableName); - desc.setIsTblLevel(false); - desc.setPartName(partNames[i]); - stats.setStatsDesc(desc); - - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName(stringcol); - obj.setColType("string"); - ColumnStatisticsData data = new ColumnStatisticsData(); - StringColumnStatsData strData = new StringColumnStatsData(); - strData.setMaxColLen(strMaxLen); - strData.setAvgColLen(strAvgLen); - strData.setNumNulls(strNulls); - strData.setNumDVs(strDVs); - data.setStringStats(strData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(stats, Arrays.asList(partVals[i])); - } - - // Make sure when we ask for one we only get one - statsList = store.getPartitionColumnStatistics(dbname, tableName, - Arrays.asList(partNames), Arrays.asList(boolcol)); - - Assert.assertEquals(2, statsList.size()); - for (int i = 0; i < partNames.length; i++) { - Assert.assertEquals(1, statsList.get(i).getStatsObjSize()); - } - - statsList = store.getPartitionColumnStatistics(dbname, tableName, - Arrays.asList(partNames), Arrays.asList(boolcol, stringcol)); - - Assert.assertEquals(2, statsList.size()); - for (int i = 0; i < partNames.length; i++) { - Assert.assertEquals(2, statsList.get(i).getStatsObjSize()); - // Just check one piece of the data, I don't need to check it all again - Assert.assertEquals(booleanNulls, - statsList.get(i).getStatsObj().get(0).getStatsData().getBooleanStats().getNumNulls()); - Assert.assertEquals(strDVs, - statsList.get(i).getStatsObj().get(1).getStatsData().getStringStats().getNumDVs()); - } - } - - @Test - public void delegationToken() throws Exception { - store.addToken("abc", "def"); - store.addToken("ghi", "jkl"); - - Assert.assertEquals("def", store.getToken("abc")); - Assert.assertEquals("jkl", store.getToken("ghi")); - Assert.assertNull(store.getToken("wabawaba")); - String[] allToks = store.getAllTokenIdentifiers().toArray(new String[2]); - Arrays.sort(allToks); - Assert.assertArrayEquals(new String[]{"abc", "ghi"}, allToks); - - store.removeToken("abc"); - store.removeToken("wabawaba"); - - Assert.assertNull(store.getToken("abc")); - Assert.assertEquals("jkl", store.getToken("ghi")); - allToks = store.getAllTokenIdentifiers().toArray(new String[1]); - Assert.assertArrayEquals(new String[]{"ghi"}, allToks); - } - - @Test - public void masterKey() throws Exception { - Assert.assertEquals(0, store.addMasterKey("k1")); - Assert.assertEquals(1, store.addMasterKey("k2")); - - String[] keys = store.getMasterKeys(); - Arrays.sort(keys); - Assert.assertArrayEquals(new String[]{"k1", "k2"}, keys); - - store.updateMasterKey(0, "k3"); - keys = store.getMasterKeys(); - Arrays.sort(keys); - Assert.assertArrayEquals(new String[]{"k2", "k3"}, keys); - - store.removeMasterKey(1); - keys = store.getMasterKeys(); - Assert.assertArrayEquals(new String[]{"k3"}, keys); - - thrown.expect(NoSuchObjectException.class); - store.updateMasterKey(72, "whatever"); - } - -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java deleted file mode 100644 index c29e46a..0000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import java.io.IOException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Integration tests with HBase Mini-cluster for HBaseStore - */ -public class TestStorageDescriptorSharing extends HBaseIntegrationTests { - - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStoreIntegration.class.getName()); - - private MessageDigest md; - - @BeforeClass - public static void startup() throws Exception { - HBaseIntegrationTests.startMiniCluster(); - } - - @AfterClass - public static void shutdown() throws Exception { - HBaseIntegrationTests.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException { - setupConnection(); - setupHBaseStore(); - try { - md = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - } - - @Test - public void createManyPartitions() throws Exception { - String dbName = "default"; - String tableName = "manyParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); - for (String val : partVals) { - List vals = new ArrayList(); - vals.add(val); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + val); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(dbName, tableName, vals); - Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); - } - - Assert.assertEquals(1, HBaseReadWrite.getInstance().countStorageDescriptor()); - - String tableName2 = "differentTable"; - sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0, - serde, null, null, emptyParameters); - table = new Table(tableName2, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); - - // Drop one of the partitions and make sure it doesn't drop the storage descriptor - store.dropPartition(dbName, tableName, Arrays.asList(partVals.get(0))); - Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); - - // Alter the second table in a few ways to make sure it changes it's descriptor properly - table = store.getTable(dbName, tableName2); - byte[] sdHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); - - // Alter the table without touching the storage descriptor - table.setLastAccessTime(startTime + 1); - store.alterTable(dbName, tableName2, table); - Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); - table = store.getTable(dbName, tableName2); - byte[] alteredHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); - Assert.assertArrayEquals(sdHash, alteredHash); - - // Alter the table, changing the storage descriptor - table.getSd().setOutputFormat("output_changed"); - store.alterTable(dbName, tableName2, table); - Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); - table = store.getTable(dbName, tableName2); - alteredHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); - Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); - - // Alter one of the partitions without touching the storage descriptor - Partition part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); - sdHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); - part.setLastAccessTime(part.getLastAccessTime() + 1); - store.alterPartition(dbName, tableName, Arrays.asList(partVals.get(1)), part); - Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); - part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); - alteredHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); - Assert.assertArrayEquals(sdHash, alteredHash); - - // Alter the partition, changing the storage descriptor - part.getSd().setOutputFormat("output_changed_some_more"); - store.alterPartition(dbName, tableName, Arrays.asList(partVals.get(1)), part); - Assert.assertEquals(3, HBaseReadWrite.getInstance().countStorageDescriptor()); - part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); - alteredHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); - Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); - - // Alter multiple partitions without touching the storage descriptors - List parts = store.getPartitions(dbName, tableName, -1); - sdHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); - for (int i = 1; i < 3; i++) { - parts.get(i).setLastAccessTime(97); - } - List> listPartVals = new ArrayList>(); - for (String pv : partVals.subList(1, partVals.size())) { - listPartVals.add(Arrays.asList(pv)); - } - store.alterPartitions(dbName, tableName, listPartVals, parts); - Assert.assertEquals(3, HBaseReadWrite.getInstance().countStorageDescriptor()); - parts = store.getPartitions(dbName, tableName, -1); - alteredHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); - Assert.assertArrayEquals(sdHash, alteredHash); - - // Alter multiple partitions changning the storage descriptors - parts = store.getPartitions(dbName, tableName, -1); - sdHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); - for (int i = 1; i < 3; i++) { - parts.get(i).getSd().setOutputFormat("yet_a_different_of"); - } - store.alterPartitions(dbName, tableName, listPartVals, parts); - Assert.assertEquals(4, HBaseReadWrite.getInstance().countStorageDescriptor()); - parts = store.getPartitions(dbName, tableName, -1); - alteredHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); - Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); - - for (String partVal : partVals.subList(1, partVals.size())) { - store.dropPartition(dbName, tableName, Arrays.asList(partVal)); - } - store.dropTable(dbName, tableName); - store.dropTable(dbName, tableName2); - - Assert.assertEquals(0, HBaseReadWrite.getInstance().countStorageDescriptor()); - - - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java index c17ca10..dd2be96 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java @@ -90,7 +90,7 @@ public CheckResults(String outDir, String logDir, MiniClusterType miniMr, String hadoopVer, String locationSubdir) throws Exception { - super(outDir, logDir, miniMr, null, hadoopVer, "", "", false, false); + super(outDir, logDir, miniMr, null, hadoopVer, "", "", false); this.locationSubdir = locationSubdir; } } diff --git itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java index 749abb5..a4dd07e 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java @@ -29,7 +29,7 @@ public AccumuloQTestUtil(String outDir, String logDir, MiniClusterType miniMr, AccumuloTestSetup setup, String initScript, String cleanupScript) throws Exception { - super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false, false); + super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false); setup.setupWithHiveConf(conf); this.setup = setup; } diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java index c12f51e..72336e2 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java @@ -45,7 +45,7 @@ public static final String HIVE_ROOT = getHiveRoot(); public static enum MetastoreType { - sql, hbase + sql }; private MetastoreType metastoreType = MetastoreType.sql; @@ -413,8 +413,6 @@ protected void setMetastoreType(MetastoreType mt) { if (metaStoreTypeProperty != null) { if (metaStoreTypeProperty.equalsIgnoreCase("sql")) { metastoreType = MetastoreType.sql; - } else if (metaStoreTypeProperty.equalsIgnoreCase("hbase")) { - metastoreType = MetastoreType.hbase; } else { throw new IllegalArgumentException("Unknown metastore type: " + metaStoreTypeProperty); } diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java index 02abe53..67e03a4 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java @@ -55,11 +55,10 @@ public void beforeClass() { String hiveConfDir = cliConfig.getHiveConfDir(); String initScript = cliConfig.getInitScript(); String cleanupScript = cliConfig.getCleanupScript(); - boolean useHBaseMetastore = cliConfig.getMetastoreType() == MetastoreType.hbase; try { String hadoopVer = cliConfig.getHadoopVersion(); qt = new QTestUtil((cliConfig.getResultsDir()), (cliConfig.getLogDir()), miniMR, - hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore, true); + hiveConfDir, hadoopVer, initScript, cleanupScript, true); if (Strings.isNullOrEmpty(qt.getConf().get(HCONF_TEST_BLOBSTORE_PATH))) { fail(String.format("%s must be set. Try setting in blobstore-conf.xml", HCONF_TEST_BLOBSTORE_PATH)); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java index d59b650..a1762ec 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java @@ -55,7 +55,6 @@ public void beforeClass() { final String hiveConfDir = cliConfig.getHiveConfDir(); final String initScript = cliConfig.getInitScript(); final String cleanupScript = cliConfig.getCleanupScript(); - final boolean useHBaseMetastore = cliConfig.getMetastoreType() == MetastoreType.hbase; try { final String hadoopVer = cliConfig.getHadoopVersion(); @@ -63,8 +62,7 @@ public void beforeClass() { @Override public QTestUtil invokeInternal() throws Exception { return new QTestUtil((cliConfig.getResultsDir()), (cliConfig.getLogDir()), miniMR, - hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore, true, - cliConfig.getFsType()); + hiveConfDir, hadoopVer, initScript, cleanupScript, true, cliConfig.getFsType()); } }.invoke("QtestUtil instance created", LOG, true); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java index bff81dd..64b419b 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java @@ -52,7 +52,7 @@ public void beforeClass() { try { String hadoopVer = cliConfig.getHadoopVersion(); qt = new QTestUtil(cliConfig.getResultsDir(), cliConfig.getLogDir(), miniMR, - hiveConfDir, hadoopVer, initScript, cleanupScript, false, false); + hiveConfDir, hadoopVer, initScript, cleanupScript, false); // do a one time initialization qt.cleanUp(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java index 438a61e..7947988 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java @@ -45,7 +45,7 @@ public void beforeClass(){ try { String hadoopVer = cliConfig.getHadoopVersion(); qt = new QTestUtil((cliConfig.getResultsDir()), (cliConfig.getLogDir()), miniMR, - hiveConfDir, hadoopVer, initScript, cleanupScript, false, false); + hiveConfDir, hadoopVer, initScript, cleanupScript, false); // do a one time initialization qt.cleanUp(); qt.createSources(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java index 34eeb77..d80bd44 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java +++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java @@ -61,7 +61,7 @@ public void beforeClass() { String hadoopVer = cliConfig.getHadoopVersion(); qt = new QTestUtil(cliConfig.getResultsDir(), cliConfig.getLogDir(), miniMR, hiveConfDir, hadoopVer, initScript, - cleanupScript, false, false); + cleanupScript, false); // do a one time initialization qt.cleanUp(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java index 01faaba..aeb7215 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java @@ -46,7 +46,7 @@ public HBaseQTestUtil( String initScript, String cleanupScript) throws Exception { - super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false, false); + super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false); hbaseSetup = setup; hbaseSetup.preTest(conf); this.conn = setup.getConnection(); diff --git itests/util/src/main/java/org/apache/hadoop/hive/metastore/hbase/HBaseStoreTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/metastore/hbase/HBaseStoreTestUtil.java deleted file mode 100644 index 21e8f7e..0000000 --- itests/util/src/main/java/org/apache/hadoop/hive/metastore/hbase/HBaseStoreTestUtil.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hive.conf.HiveConf; - -import java.util.List; - -public class HBaseStoreTestUtil { - public static void initHBaseMetastore(HBaseAdmin admin, HiveConf conf) throws Exception { - for (String tableName : HBaseReadWrite.tableNames) { - List families = HBaseReadWrite.columnFamilies.get(tableName); - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); - for (byte[] family : families) { - HColumnDescriptor columnDesc = new HColumnDescriptor(family); - desc.addFamily(columnDesc); - } - admin.createTable(desc); - } - admin.close(); - if (conf != null) { - HBaseReadWrite.setConf(conf); - } - } -} \ No newline at end of file diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 535cfd9..825f826 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -103,7 +103,6 @@ import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.hbase.HBaseStore; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -202,7 +201,6 @@ private final String initScript; private final String cleanupScript; - private boolean useHBaseMetastore = false; public interface SuiteAddTestFunctor { public void addTestToSuite(TestSuite suite, Object setup, String tName); @@ -347,14 +345,9 @@ public void initConf() throws Exception { conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, true); } - if (!useHBaseMetastore) { - // Plug verifying metastore in for testing DirectSQL. - conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); - } else { - conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, HBaseStore.class.getName()); - conf.setBoolVar(ConfVars.METASTORE_FASTPATH, true); - } + // Plug verifying metastore in for testing DirectSQL. + conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, + "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); if (mr != null) { mr.setupConfiguration(conf); @@ -514,40 +507,22 @@ private String getKeyProviderURI() { return "jceks://file" + new Path(keyDir, "test.jks").toUri(); } - private void startMiniHBaseCluster() throws Exception { - Configuration hbaseConf = HBaseConfiguration.create(); - hbaseConf.setInt("hbase.master.info.port", -1); - utility = new HBaseTestingUtility(hbaseConf); - utility.startMiniCluster(); - conf = new HiveConf(utility.getConfiguration(), Driver.class); - HBaseAdmin admin = utility.getHBaseAdmin(); - // Need to use reflection here to make compilation pass since HBaseIntegrationTests - // is not compiled in hadoop-1. All HBaseMetastore tests run under hadoop-2, so this - // guarantee HBaseIntegrationTests exist when we hitting this code path - java.lang.reflect.Method initHBaseMetastoreMethod = Class.forName( - "org.apache.hadoop.hive.metastore.hbase.HBaseStoreTestUtil") - .getMethod("initHBaseMetastore", HBaseAdmin.class, HiveConf.class); - initHBaseMetastoreMethod.invoke(null, admin, conf); - conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, HBaseStore.class.getName()); - conf.setBoolVar(ConfVars.METASTORE_FASTPATH, true); - } - public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer, String initScript, String cleanupScript, - boolean useHBaseMetastore, boolean withLlapIo) throws Exception { + boolean withLlapIo) throws Exception { this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, - useHBaseMetastore, withLlapIo, null); + withLlapIo, null); } public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadzoopVer, String initScript, String cleanupScript, - boolean useHBaseMetastore, boolean withLlapIo, FsType fsType) + boolean withLlapIo, FsType fsType) throws Exception { LOG.info("Setting up QTestUtil with outDir={}, logDir={}, clusterType={}, confDir={}," + - " hadoopVer={}, initScript={}, cleanupScript={}, useHbaseMetaStore={}, withLlapIo={}," + + " hadoopVer={}, initScript={}, cleanupScript={}, withLlapIo={}," + " fsType={}" , outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, - useHBaseMetastore, withLlapIo, fsType); + withLlapIo, fsType); Preconditions.checkNotNull(clusterType, "ClusterType cannot be null"); if (fsType != null) { this.fsType = fsType; @@ -556,7 +531,6 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, } this.outDir = outDir; this.logDir = logDir; - this.useHBaseMetastore = useHBaseMetastore; this.srcTables=getSrcTables(); this.srcUDFs = getSrcUDFs(); @@ -567,11 +541,7 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, } queryState = new QueryState.Builder().withHiveConf(new HiveConf(Driver.class)).build(); - if (useHBaseMetastore) { - startMiniHBaseCluster(); - } else { - conf = queryState.getConf(); - } + conf = queryState.getConf(); this.hadoopVer = getHadoopMainVersion(hadoopVer); qMap = new TreeMap(); qSkipSet = new HashSet(); @@ -697,9 +667,6 @@ public void shutdown() throws Exception { sparkSession = null; } } - if (useHBaseMetastore) { - utility.shutdownMiniCluster(); - } if (mr != null) { mr.shutdown(); mr = null; @@ -2032,8 +1999,7 @@ public void run() { for (int i = 0; i < qfiles.length; i++) { qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20", initScript == null ? defaultInitScript : initScript, - cleanupScript == null ? defaultCleanupScript : cleanupScript, - false, false); + cleanupScript == null ? defaultCleanupScript : cleanupScript, false); qt[i].addFile(qfiles[i]); qt[i].clearTestSideEffects(); } diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java index 31f69a3..30ac6d1 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/parse/CoreParseNegative.java @@ -54,8 +54,7 @@ public void beforeClass() { try { String hadoopVer = cliConfig.getHadoopVersion(); qt = new QTestUtil((cliConfig.getResultsDir()), (cliConfig.getLogDir()), miniMR, null, - hadoopVer, - initScript, cleanupScript, false, false); + hadoopVer, initScript, cleanupScript, false); } catch (Exception e) { System.err.println("Exception: " + e.getMessage()); e.printStackTrace(); diff --git metastore/pom.xml metastore/pom.xml index 733f891..999528a 100644 --- metastore/pom.xml +++ metastore/pom.xml @@ -332,11 +332,6 @@ - - - - - diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java deleted file mode 100644 index 9cf1ee2..0000000 --- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ /dev/null @@ -1,46709 +0,0 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: hbase_metastore_proto.proto - -package org.apache.hadoop.hive.metastore.hbase; - -public final class HbaseMetastoreProto { - private HbaseMetastoreProto() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - /** - * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.PrincipalType} - */ - public enum PrincipalType - implements com.google.protobuf.ProtocolMessageEnum { - /** - * USER = 0; - */ - USER(0, 0), - /** - * ROLE = 1; - */ - ROLE(1, 1), - ; - - /** - * USER = 0; - */ - public static final int USER_VALUE = 0; - /** - * ROLE = 1; - */ - public static final int ROLE_VALUE = 1; - - - public final int getNumber() { return value; } - - public static PrincipalType valueOf(int value) { - switch (value) { - case 0: return USER; - case 1: return ROLE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public PrincipalType findValueByNumber(int number) { - return PrincipalType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.getDescriptor().getEnumTypes().get(0); - } - - private static final PrincipalType[] VALUES = values(); - - public static PrincipalType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private PrincipalType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalType) - } - - public interface AggrStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required int64 parts_found = 1; - /** - * required int64 parts_found = 1; - */ - boolean hasPartsFound(); - /** - * required int64 parts_found = 1; - */ - long getPartsFound(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - java.util.List - getColStatsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - int getColStatsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - java.util.List - getColStatsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats} - */ - public static final class AggrStats extends - com.google.protobuf.GeneratedMessage - implements AggrStatsOrBuilder { - // Use AggrStats.newBuilder() to construct. - private AggrStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private AggrStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final AggrStats defaultInstance; - public static AggrStats getDefaultInstance() { - return defaultInstance; - } - - public AggrStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private AggrStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - partsFound_ = input.readInt64(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - colStats_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - colStats_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - colStats_ = java.util.Collections.unmodifiableList(colStats_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AggrStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AggrStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required int64 parts_found = 1; - public static final int PARTS_FOUND_FIELD_NUMBER = 1; - private long partsFound_; - /** - * required int64 parts_found = 1; - */ - public boolean hasPartsFound() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int64 parts_found = 1; - */ - public long getPartsFound() { - return partsFound_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - public static final int COL_STATS_FIELD_NUMBER = 2; - private java.util.List colStats_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public java.util.List getColStatsList() { - return colStats_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public java.util.List - getColStatsOrBuilderList() { - return colStats_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public int getColStatsCount() { - return colStats_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) { - return colStats_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( - int index) { - return colStats_.get(index); - } - - private void initFields() { - partsFound_ = 0L; - colStats_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPartsFound()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getColStatsCount(); i++) { - if (!getColStats(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, partsFound_); - } - for (int i = 0; i < colStats_.size(); i++) { - output.writeMessage(2, colStats_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, partsFound_); - } - for (int i = 0; i < colStats_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, colStats_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getColStatsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - partsFound_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - if (colStatsBuilder_ == null) { - colStats_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - colStatsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.partsFound_ = partsFound_; - if (colStatsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - colStats_ = java.util.Collections.unmodifiableList(colStats_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.colStats_ = colStats_; - } else { - result.colStats_ = colStatsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance()) return this; - if (other.hasPartsFound()) { - setPartsFound(other.getPartsFound()); - } - if (colStatsBuilder_ == null) { - if (!other.colStats_.isEmpty()) { - if (colStats_.isEmpty()) { - colStats_ = other.colStats_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureColStatsIsMutable(); - colStats_.addAll(other.colStats_); - } - onChanged(); - } - } else { - if (!other.colStats_.isEmpty()) { - if (colStatsBuilder_.isEmpty()) { - colStatsBuilder_.dispose(); - colStatsBuilder_ = null; - colStats_ = other.colStats_; - bitField0_ = (bitField0_ & ~0x00000002); - colStatsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColStatsFieldBuilder() : null; - } else { - colStatsBuilder_.addAllMessages(other.colStats_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPartsFound()) { - - return false; - } - for (int i = 0; i < getColStatsCount(); i++) { - if (!getColStats(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required int64 parts_found = 1; - private long partsFound_ ; - /** - * required int64 parts_found = 1; - */ - public boolean hasPartsFound() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int64 parts_found = 1; - */ - public long getPartsFound() { - return partsFound_; - } - /** - * required int64 parts_found = 1; - */ - public Builder setPartsFound(long value) { - bitField0_ |= 0x00000001; - partsFound_ = value; - onChanged(); - return this; - } - /** - * required int64 parts_found = 1; - */ - public Builder clearPartsFound() { - bitField0_ = (bitField0_ & ~0x00000001); - partsFound_ = 0L; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - private java.util.List colStats_ = - java.util.Collections.emptyList(); - private void ensureColStatsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - colStats_ = new java.util.ArrayList(colStats_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> colStatsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public java.util.List getColStatsList() { - if (colStatsBuilder_ == null) { - return java.util.Collections.unmodifiableList(colStats_); - } else { - return colStatsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public int getColStatsCount() { - if (colStatsBuilder_ == null) { - return colStats_.size(); - } else { - return colStatsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) { - if (colStatsBuilder_ == null) { - return colStats_.get(index); - } else { - return colStatsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder setColStats( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { - if (colStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColStatsIsMutable(); - colStats_.set(index, value); - onChanged(); - } else { - colStatsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder setColStats( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { - if (colStatsBuilder_ == null) { - ensureColStatsIsMutable(); - colStats_.set(index, builderForValue.build()); - onChanged(); - } else { - colStatsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder addColStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { - if (colStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColStatsIsMutable(); - colStats_.add(value); - onChanged(); - } else { - colStatsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder addColStats( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { - if (colStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColStatsIsMutable(); - colStats_.add(index, value); - onChanged(); - } else { - colStatsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder addColStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { - if (colStatsBuilder_ == null) { - ensureColStatsIsMutable(); - colStats_.add(builderForValue.build()); - onChanged(); - } else { - colStatsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder addColStats( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { - if (colStatsBuilder_ == null) { - ensureColStatsIsMutable(); - colStats_.add(index, builderForValue.build()); - onChanged(); - } else { - colStatsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder addAllColStats( - java.lang.Iterable values) { - if (colStatsBuilder_ == null) { - ensureColStatsIsMutable(); - super.addAll(values, colStats_); - onChanged(); - } else { - colStatsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder clearColStats() { - if (colStatsBuilder_ == null) { - colStats_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - colStatsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public Builder removeColStats(int index) { - if (colStatsBuilder_ == null) { - ensureColStatsIsMutable(); - colStats_.remove(index); - onChanged(); - } else { - colStatsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder getColStatsBuilder( - int index) { - return getColStatsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( - int index) { - if (colStatsBuilder_ == null) { - return colStats_.get(index); } else { - return colStatsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public java.util.List - getColStatsOrBuilderList() { - if (colStatsBuilder_ != null) { - return colStatsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(colStats_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder() { - return getColStatsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder( - int index) { - return getColStatsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; - */ - public java.util.List - getColStatsBuilderList() { - return getColStatsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> - getColStatsFieldBuilder() { - if (colStatsBuilder_ == null) { - colStatsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>( - colStats_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - colStats_ = null; - } - return colStatsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats) - } - - static { - defaultInstance = new AggrStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats) - } - - public interface AggrStatsBloomFilterOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes db_name = 1; - /** - * required bytes db_name = 1; - */ - boolean hasDbName(); - /** - * required bytes db_name = 1; - */ - com.google.protobuf.ByteString getDbName(); - - // required bytes table_name = 2; - /** - * required bytes table_name = 2; - */ - boolean hasTableName(); - /** - * required bytes table_name = 2; - */ - com.google.protobuf.ByteString getTableName(); - - // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - boolean hasBloomFilter(); - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter(); - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder(); - - // required int64 aggregated_at = 4; - /** - * required int64 aggregated_at = 4; - */ - boolean hasAggregatedAt(); - /** - * required int64 aggregated_at = 4; - */ - long getAggregatedAt(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter} - */ - public static final class AggrStatsBloomFilter extends - com.google.protobuf.GeneratedMessage - implements AggrStatsBloomFilterOrBuilder { - // Use AggrStatsBloomFilter.newBuilder() to construct. - private AggrStatsBloomFilter(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private AggrStatsBloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final AggrStatsBloomFilter defaultInstance; - public static AggrStatsBloomFilter getDefaultInstance() { - return defaultInstance; - } - - public AggrStatsBloomFilter getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private AggrStatsBloomFilter( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - dbName_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - tableName_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder subBuilder = null; - if (((bitField0_ & 0x00000004) == 0x00000004)) { - subBuilder = bloomFilter_.toBuilder(); - } - bloomFilter_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(bloomFilter_); - bloomFilter_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000004; - break; - } - case 32: { - bitField0_ |= 0x00000008; - aggregatedAt_ = input.readInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AggrStatsBloomFilter parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AggrStatsBloomFilter(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface BloomFilterOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required int32 num_bits = 1; - /** - * required int32 num_bits = 1; - */ - boolean hasNumBits(); - /** - * required int32 num_bits = 1; - */ - int getNumBits(); - - // required int32 num_funcs = 2; - /** - * required int32 num_funcs = 2; - */ - boolean hasNumFuncs(); - /** - * required int32 num_funcs = 2; - */ - int getNumFuncs(); - - // repeated int64 bits = 3; - /** - * repeated int64 bits = 3; - */ - java.util.List getBitsList(); - /** - * repeated int64 bits = 3; - */ - int getBitsCount(); - /** - * repeated int64 bits = 3; - */ - long getBits(int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter} - */ - public static final class BloomFilter extends - com.google.protobuf.GeneratedMessage - implements BloomFilterOrBuilder { - // Use BloomFilter.newBuilder() to construct. - private BloomFilter(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BloomFilter defaultInstance; - public static BloomFilter getDefaultInstance() { - return defaultInstance; - } - - public BloomFilter getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BloomFilter( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - numBits_ = input.readInt32(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - numFuncs_ = input.readInt32(); - break; - } - case 24: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - bits_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - bits_.add(input.readInt64()); - break; - } - case 26: { - int length = input.readRawVarint32(); - int limit = input.pushLimit(length); - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { - bits_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - while (input.getBytesUntilLimit() > 0) { - bits_.add(input.readInt64()); - } - input.popLimit(limit); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - bits_ = java.util.Collections.unmodifiableList(bits_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BloomFilter parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BloomFilter(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required int32 num_bits = 1; - public static final int NUM_BITS_FIELD_NUMBER = 1; - private int numBits_; - /** - * required int32 num_bits = 1; - */ - public boolean hasNumBits() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int32 num_bits = 1; - */ - public int getNumBits() { - return numBits_; - } - - // required int32 num_funcs = 2; - public static final int NUM_FUNCS_FIELD_NUMBER = 2; - private int numFuncs_; - /** - * required int32 num_funcs = 2; - */ - public boolean hasNumFuncs() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required int32 num_funcs = 2; - */ - public int getNumFuncs() { - return numFuncs_; - } - - // repeated int64 bits = 3; - public static final int BITS_FIELD_NUMBER = 3; - private java.util.List bits_; - /** - * repeated int64 bits = 3; - */ - public java.util.List - getBitsList() { - return bits_; - } - /** - * repeated int64 bits = 3; - */ - public int getBitsCount() { - return bits_.size(); - } - /** - * repeated int64 bits = 3; - */ - public long getBits(int index) { - return bits_.get(index); - } - - private void initFields() { - numBits_ = 0; - numFuncs_ = 0; - bits_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasNumBits()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasNumFuncs()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt32(1, numBits_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt32(2, numFuncs_); - } - for (int i = 0; i < bits_.size(); i++) { - output.writeInt64(3, bits_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(1, numBits_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(2, numFuncs_); - } - { - int dataSize = 0; - for (int i = 0; i < bits_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeInt64SizeNoTag(bits_.get(i)); - } - size += dataSize; - size += 1 * getBitsList().size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - numBits_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - numFuncs_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - bits_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.numBits_ = numBits_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.numFuncs_ = numFuncs_; - if (((bitField0_ & 0x00000004) == 0x00000004)) { - bits_ = java.util.Collections.unmodifiableList(bits_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.bits_ = bits_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) return this; - if (other.hasNumBits()) { - setNumBits(other.getNumBits()); - } - if (other.hasNumFuncs()) { - setNumFuncs(other.getNumFuncs()); - } - if (!other.bits_.isEmpty()) { - if (bits_.isEmpty()) { - bits_ = other.bits_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureBitsIsMutable(); - bits_.addAll(other.bits_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasNumBits()) { - - return false; - } - if (!hasNumFuncs()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required int32 num_bits = 1; - private int numBits_ ; - /** - * required int32 num_bits = 1; - */ - public boolean hasNumBits() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int32 num_bits = 1; - */ - public int getNumBits() { - return numBits_; - } - /** - * required int32 num_bits = 1; - */ - public Builder setNumBits(int value) { - bitField0_ |= 0x00000001; - numBits_ = value; - onChanged(); - return this; - } - /** - * required int32 num_bits = 1; - */ - public Builder clearNumBits() { - bitField0_ = (bitField0_ & ~0x00000001); - numBits_ = 0; - onChanged(); - return this; - } - - // required int32 num_funcs = 2; - private int numFuncs_ ; - /** - * required int32 num_funcs = 2; - */ - public boolean hasNumFuncs() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required int32 num_funcs = 2; - */ - public int getNumFuncs() { - return numFuncs_; - } - /** - * required int32 num_funcs = 2; - */ - public Builder setNumFuncs(int value) { - bitField0_ |= 0x00000002; - numFuncs_ = value; - onChanged(); - return this; - } - /** - * required int32 num_funcs = 2; - */ - public Builder clearNumFuncs() { - bitField0_ = (bitField0_ & ~0x00000002); - numFuncs_ = 0; - onChanged(); - return this; - } - - // repeated int64 bits = 3; - private java.util.List bits_ = java.util.Collections.emptyList(); - private void ensureBitsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - bits_ = new java.util.ArrayList(bits_); - bitField0_ |= 0x00000004; - } - } - /** - * repeated int64 bits = 3; - */ - public java.util.List - getBitsList() { - return java.util.Collections.unmodifiableList(bits_); - } - /** - * repeated int64 bits = 3; - */ - public int getBitsCount() { - return bits_.size(); - } - /** - * repeated int64 bits = 3; - */ - public long getBits(int index) { - return bits_.get(index); - } - /** - * repeated int64 bits = 3; - */ - public Builder setBits( - int index, long value) { - ensureBitsIsMutable(); - bits_.set(index, value); - onChanged(); - return this; - } - /** - * repeated int64 bits = 3; - */ - public Builder addBits(long value) { - ensureBitsIsMutable(); - bits_.add(value); - onChanged(); - return this; - } - /** - * repeated int64 bits = 3; - */ - public Builder addAllBits( - java.lang.Iterable values) { - ensureBitsIsMutable(); - super.addAll(values, bits_); - onChanged(); - return this; - } - /** - * repeated int64 bits = 3; - */ - public Builder clearBits() { - bits_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter) - } - - static { - defaultInstance = new BloomFilter(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter) - } - - private int bitField0_; - // required bytes db_name = 1; - public static final int DB_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString dbName_; - /** - * required bytes db_name = 1; - */ - public boolean hasDbName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes db_name = 1; - */ - public com.google.protobuf.ByteString getDbName() { - return dbName_; - } - - // required bytes table_name = 2; - public static final int TABLE_NAME_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString tableName_; - /** - * required bytes table_name = 2; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes table_name = 2; - */ - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - - // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - public static final int BLOOM_FILTER_FIELD_NUMBER = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_; - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public boolean hasBloomFilter() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() { - return bloomFilter_; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() { - return bloomFilter_; - } - - // required int64 aggregated_at = 4; - public static final int AGGREGATED_AT_FIELD_NUMBER = 4; - private long aggregatedAt_; - /** - * required int64 aggregated_at = 4; - */ - public boolean hasAggregatedAt() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required int64 aggregated_at = 4; - */ - public long getAggregatedAt() { - return aggregatedAt_; - } - - private void initFields() { - dbName_ = com.google.protobuf.ByteString.EMPTY; - tableName_ = com.google.protobuf.ByteString.EMPTY; - bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); - aggregatedAt_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasDbName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTableName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBloomFilter()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasAggregatedAt()) { - memoizedIsInitialized = 0; - return false; - } - if (!getBloomFilter().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, dbName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, bloomFilter_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(4, aggregatedAt_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, dbName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, bloomFilter_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, aggregatedAt_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilterOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBloomFilterFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - dbName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - if (bloomFilterBuilder_ == null) { - bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); - } else { - bloomFilterBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - aggregatedAt_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.dbName_ = dbName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.tableName_ = tableName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (bloomFilterBuilder_ == null) { - result.bloomFilter_ = bloomFilter_; - } else { - result.bloomFilter_ = bloomFilterBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.aggregatedAt_ = aggregatedAt_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance()) return this; - if (other.hasDbName()) { - setDbName(other.getDbName()); - } - if (other.hasTableName()) { - setTableName(other.getTableName()); - } - if (other.hasBloomFilter()) { - mergeBloomFilter(other.getBloomFilter()); - } - if (other.hasAggregatedAt()) { - setAggregatedAt(other.getAggregatedAt()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasDbName()) { - - return false; - } - if (!hasTableName()) { - - return false; - } - if (!hasBloomFilter()) { - - return false; - } - if (!hasAggregatedAt()) { - - return false; - } - if (!getBloomFilter().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required bytes db_name = 1; - private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes db_name = 1; - */ - public boolean hasDbName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes db_name = 1; - */ - public com.google.protobuf.ByteString getDbName() { - return dbName_; - } - /** - * required bytes db_name = 1; - */ - public Builder setDbName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - dbName_ = value; - onChanged(); - return this; - } - /** - * required bytes db_name = 1; - */ - public Builder clearDbName() { - bitField0_ = (bitField0_ & ~0x00000001); - dbName_ = getDefaultInstance().getDbName(); - onChanged(); - return this; - } - - // required bytes table_name = 2; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes table_name = 2; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes table_name = 2; - */ - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - /** - * required bytes table_name = 2; - */ - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - tableName_ = value; - onChanged(); - return this; - } - /** - * required bytes table_name = 2; - */ - public Builder clearTableName() { - bitField0_ = (bitField0_ & ~0x00000002); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); - return this; - } - - // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> bloomFilterBuilder_; - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public boolean hasBloomFilter() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() { - if (bloomFilterBuilder_ == null) { - return bloomFilter_; - } else { - return bloomFilterBuilder_.getMessage(); - } - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public Builder setBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) { - if (bloomFilterBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - bloomFilter_ = value; - onChanged(); - } else { - bloomFilterBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public Builder setBloomFilter( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder builderForValue) { - if (bloomFilterBuilder_ == null) { - bloomFilter_ = builderForValue.build(); - onChanged(); - } else { - bloomFilterBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public Builder mergeBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) { - if (bloomFilterBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - bloomFilter_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) { - bloomFilter_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder(bloomFilter_).mergeFrom(value).buildPartial(); - } else { - bloomFilter_ = value; - } - onChanged(); - } else { - bloomFilterBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public Builder clearBloomFilter() { - if (bloomFilterBuilder_ == null) { - bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); - onChanged(); - } else { - bloomFilterBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder getBloomFilterBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getBloomFilterFieldBuilder().getBuilder(); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() { - if (bloomFilterBuilder_ != null) { - return bloomFilterBuilder_.getMessageOrBuilder(); - } else { - return bloomFilter_; - } - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> - getBloomFilterFieldBuilder() { - if (bloomFilterBuilder_ == null) { - bloomFilterBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder>( - bloomFilter_, - getParentForChildren(), - isClean()); - bloomFilter_ = null; - } - return bloomFilterBuilder_; - } - - // required int64 aggregated_at = 4; - private long aggregatedAt_ ; - /** - * required int64 aggregated_at = 4; - */ - public boolean hasAggregatedAt() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required int64 aggregated_at = 4; - */ - public long getAggregatedAt() { - return aggregatedAt_; - } - /** - * required int64 aggregated_at = 4; - */ - public Builder setAggregatedAt(long value) { - bitField0_ |= 0x00000008; - aggregatedAt_ = value; - onChanged(); - return this; - } - /** - * required int64 aggregated_at = 4; - */ - public Builder clearAggregatedAt() { - bitField0_ = (bitField0_ & ~0x00000008); - aggregatedAt_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter) - } - - static { - defaultInstance = new AggrStatsBloomFilter(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter) - } - - public interface AggrStatsInvalidatorFilterOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - java.util.List - getToInvalidateList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - int getToInvalidateCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - java.util.List - getToInvalidateOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( - int index); - - // required int64 run_every = 2; - /** - * required int64 run_every = 2; - */ - boolean hasRunEvery(); - /** - * required int64 run_every = 2; - */ - long getRunEvery(); - - // required int64 max_cache_entry_life = 3; - /** - * required int64 max_cache_entry_life = 3; - */ - boolean hasMaxCacheEntryLife(); - /** - * required int64 max_cache_entry_life = 3; - */ - long getMaxCacheEntryLife(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter} - */ - public static final class AggrStatsInvalidatorFilter extends - com.google.protobuf.GeneratedMessage - implements AggrStatsInvalidatorFilterOrBuilder { - // Use AggrStatsInvalidatorFilter.newBuilder() to construct. - private AggrStatsInvalidatorFilter(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private AggrStatsInvalidatorFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final AggrStatsInvalidatorFilter defaultInstance; - public static AggrStatsInvalidatorFilter getDefaultInstance() { - return defaultInstance; - } - - public AggrStatsInvalidatorFilter getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private AggrStatsInvalidatorFilter( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - toInvalidate_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - toInvalidate_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.PARSER, extensionRegistry)); - break; - } - case 16: { - bitField0_ |= 0x00000001; - runEvery_ = input.readInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000002; - maxCacheEntryLife_ = input.readInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - toInvalidate_ = java.util.Collections.unmodifiableList(toInvalidate_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AggrStatsInvalidatorFilter parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AggrStatsInvalidatorFilter(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface EntryOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes db_name = 1; - /** - * required bytes db_name = 1; - */ - boolean hasDbName(); - /** - * required bytes db_name = 1; - */ - com.google.protobuf.ByteString getDbName(); - - // required bytes table_name = 2; - /** - * required bytes table_name = 2; - */ - boolean hasTableName(); - /** - * required bytes table_name = 2; - */ - com.google.protobuf.ByteString getTableName(); - - // required bytes part_name = 3; - /** - * required bytes part_name = 3; - */ - boolean hasPartName(); - /** - * required bytes part_name = 3; - */ - com.google.protobuf.ByteString getPartName(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry} - */ - public static final class Entry extends - com.google.protobuf.GeneratedMessage - implements EntryOrBuilder { - // Use Entry.newBuilder() to construct. - private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Entry defaultInstance; - public static Entry getDefaultInstance() { - return defaultInstance; - } - - public Entry getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Entry( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - dbName_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - tableName_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - partName_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Entry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Entry(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required bytes db_name = 1; - public static final int DB_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString dbName_; - /** - * required bytes db_name = 1; - */ - public boolean hasDbName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes db_name = 1; - */ - public com.google.protobuf.ByteString getDbName() { - return dbName_; - } - - // required bytes table_name = 2; - public static final int TABLE_NAME_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString tableName_; - /** - * required bytes table_name = 2; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes table_name = 2; - */ - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - - // required bytes part_name = 3; - public static final int PART_NAME_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString partName_; - /** - * required bytes part_name = 3; - */ - public boolean hasPartName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes part_name = 3; - */ - public com.google.protobuf.ByteString getPartName() { - return partName_; - } - - private void initFields() { - dbName_ = com.google.protobuf.ByteString.EMPTY; - tableName_ = com.google.protobuf.ByteString.EMPTY; - partName_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasDbName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTableName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPartName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, dbName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, partName_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, dbName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, partName_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - dbName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - partName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.dbName_ = dbName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.tableName_ = tableName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.partName_ = partName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()) return this; - if (other.hasDbName()) { - setDbName(other.getDbName()); - } - if (other.hasTableName()) { - setTableName(other.getTableName()); - } - if (other.hasPartName()) { - setPartName(other.getPartName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasDbName()) { - - return false; - } - if (!hasTableName()) { - - return false; - } - if (!hasPartName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required bytes db_name = 1; - private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes db_name = 1; - */ - public boolean hasDbName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes db_name = 1; - */ - public com.google.protobuf.ByteString getDbName() { - return dbName_; - } - /** - * required bytes db_name = 1; - */ - public Builder setDbName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - dbName_ = value; - onChanged(); - return this; - } - /** - * required bytes db_name = 1; - */ - public Builder clearDbName() { - bitField0_ = (bitField0_ & ~0x00000001); - dbName_ = getDefaultInstance().getDbName(); - onChanged(); - return this; - } - - // required bytes table_name = 2; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes table_name = 2; - */ - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bytes table_name = 2; - */ - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - /** - * required bytes table_name = 2; - */ - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - tableName_ = value; - onChanged(); - return this; - } - /** - * required bytes table_name = 2; - */ - public Builder clearTableName() { - bitField0_ = (bitField0_ & ~0x00000002); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); - return this; - } - - // required bytes part_name = 3; - private com.google.protobuf.ByteString partName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes part_name = 3; - */ - public boolean hasPartName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes part_name = 3; - */ - public com.google.protobuf.ByteString getPartName() { - return partName_; - } - /** - * required bytes part_name = 3; - */ - public Builder setPartName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - partName_ = value; - onChanged(); - return this; - } - /** - * required bytes part_name = 3; - */ - public Builder clearPartName() { - bitField0_ = (bitField0_ & ~0x00000004); - partName_ = getDefaultInstance().getPartName(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry) - } - - static { - defaultInstance = new Entry(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry) - } - - private int bitField0_; - // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - public static final int TO_INVALIDATE_FIELD_NUMBER = 1; - private java.util.List toInvalidate_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public java.util.List getToInvalidateList() { - return toInvalidate_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public java.util.List - getToInvalidateOrBuilderList() { - return toInvalidate_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public int getToInvalidateCount() { - return toInvalidate_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index) { - return toInvalidate_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( - int index) { - return toInvalidate_.get(index); - } - - // required int64 run_every = 2; - public static final int RUN_EVERY_FIELD_NUMBER = 2; - private long runEvery_; - /** - * required int64 run_every = 2; - */ - public boolean hasRunEvery() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required int64 run_every = 2; - */ - public long getRunEvery() { - return runEvery_; - } - - // required int64 max_cache_entry_life = 3; - public static final int MAX_CACHE_ENTRY_LIFE_FIELD_NUMBER = 3; - private long maxCacheEntryLife_; - /** - * required int64 max_cache_entry_life = 3; - */ - public boolean hasMaxCacheEntryLife() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required int64 max_cache_entry_life = 3; - */ - public long getMaxCacheEntryLife() { - return maxCacheEntryLife_; - } - - private void initFields() { - toInvalidate_ = java.util.Collections.emptyList(); - runEvery_ = 0L; - maxCacheEntryLife_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasRunEvery()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasMaxCacheEntryLife()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getToInvalidateCount(); i++) { - if (!getToInvalidate(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < toInvalidate_.size(); i++) { - output.writeMessage(1, toInvalidate_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(2, runEvery_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(3, maxCacheEntryLife_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < toInvalidate_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, toInvalidate_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, runEvery_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, maxCacheEntryLife_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilterOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getToInvalidateFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (toInvalidateBuilder_ == null) { - toInvalidate_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - toInvalidateBuilder_.clear(); - } - runEvery_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - maxCacheEntryLife_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (toInvalidateBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - toInvalidate_ = java.util.Collections.unmodifiableList(toInvalidate_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.toInvalidate_ = toInvalidate_; - } else { - result.toInvalidate_ = toInvalidateBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.runEvery_ = runEvery_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.maxCacheEntryLife_ = maxCacheEntryLife_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.getDefaultInstance()) return this; - if (toInvalidateBuilder_ == null) { - if (!other.toInvalidate_.isEmpty()) { - if (toInvalidate_.isEmpty()) { - toInvalidate_ = other.toInvalidate_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureToInvalidateIsMutable(); - toInvalidate_.addAll(other.toInvalidate_); - } - onChanged(); - } - } else { - if (!other.toInvalidate_.isEmpty()) { - if (toInvalidateBuilder_.isEmpty()) { - toInvalidateBuilder_.dispose(); - toInvalidateBuilder_ = null; - toInvalidate_ = other.toInvalidate_; - bitField0_ = (bitField0_ & ~0x00000001); - toInvalidateBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getToInvalidateFieldBuilder() : null; - } else { - toInvalidateBuilder_.addAllMessages(other.toInvalidate_); - } - } - } - if (other.hasRunEvery()) { - setRunEvery(other.getRunEvery()); - } - if (other.hasMaxCacheEntryLife()) { - setMaxCacheEntryLife(other.getMaxCacheEntryLife()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRunEvery()) { - - return false; - } - if (!hasMaxCacheEntryLife()) { - - return false; - } - for (int i = 0; i < getToInvalidateCount(); i++) { - if (!getToInvalidate(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - private java.util.List toInvalidate_ = - java.util.Collections.emptyList(); - private void ensureToInvalidateIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - toInvalidate_ = new java.util.ArrayList(toInvalidate_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder> toInvalidateBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public java.util.List getToInvalidateList() { - if (toInvalidateBuilder_ == null) { - return java.util.Collections.unmodifiableList(toInvalidate_); - } else { - return toInvalidateBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public int getToInvalidateCount() { - if (toInvalidateBuilder_ == null) { - return toInvalidate_.size(); - } else { - return toInvalidateBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index) { - if (toInvalidateBuilder_ == null) { - return toInvalidate_.get(index); - } else { - return toInvalidateBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder setToInvalidate( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { - if (toInvalidateBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureToInvalidateIsMutable(); - toInvalidate_.set(index, value); - onChanged(); - } else { - toInvalidateBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder setToInvalidate( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { - if (toInvalidateBuilder_ == null) { - ensureToInvalidateIsMutable(); - toInvalidate_.set(index, builderForValue.build()); - onChanged(); - } else { - toInvalidateBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder addToInvalidate(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { - if (toInvalidateBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureToInvalidateIsMutable(); - toInvalidate_.add(value); - onChanged(); - } else { - toInvalidateBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder addToInvalidate( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { - if (toInvalidateBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureToInvalidateIsMutable(); - toInvalidate_.add(index, value); - onChanged(); - } else { - toInvalidateBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder addToInvalidate( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { - if (toInvalidateBuilder_ == null) { - ensureToInvalidateIsMutable(); - toInvalidate_.add(builderForValue.build()); - onChanged(); - } else { - toInvalidateBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder addToInvalidate( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { - if (toInvalidateBuilder_ == null) { - ensureToInvalidateIsMutable(); - toInvalidate_.add(index, builderForValue.build()); - onChanged(); - } else { - toInvalidateBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder addAllToInvalidate( - java.lang.Iterable values) { - if (toInvalidateBuilder_ == null) { - ensureToInvalidateIsMutable(); - super.addAll(values, toInvalidate_); - onChanged(); - } else { - toInvalidateBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder clearToInvalidate() { - if (toInvalidateBuilder_ == null) { - toInvalidate_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - toInvalidateBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public Builder removeToInvalidate(int index) { - if (toInvalidateBuilder_ == null) { - ensureToInvalidateIsMutable(); - toInvalidate_.remove(index); - onChanged(); - } else { - toInvalidateBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder getToInvalidateBuilder( - int index) { - return getToInvalidateFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( - int index) { - if (toInvalidateBuilder_ == null) { - return toInvalidate_.get(index); } else { - return toInvalidateBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public java.util.List - getToInvalidateOrBuilderList() { - if (toInvalidateBuilder_ != null) { - return toInvalidateBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(toInvalidate_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder addToInvalidateBuilder() { - return getToInvalidateFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder addToInvalidateBuilder( - int index) { - return getToInvalidateFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; - */ - public java.util.List - getToInvalidateBuilderList() { - return getToInvalidateFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder> - getToInvalidateFieldBuilder() { - if (toInvalidateBuilder_ == null) { - toInvalidateBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder>( - toInvalidate_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - toInvalidate_ = null; - } - return toInvalidateBuilder_; - } - - // required int64 run_every = 2; - private long runEvery_ ; - /** - * required int64 run_every = 2; - */ - public boolean hasRunEvery() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required int64 run_every = 2; - */ - public long getRunEvery() { - return runEvery_; - } - /** - * required int64 run_every = 2; - */ - public Builder setRunEvery(long value) { - bitField0_ |= 0x00000002; - runEvery_ = value; - onChanged(); - return this; - } - /** - * required int64 run_every = 2; - */ - public Builder clearRunEvery() { - bitField0_ = (bitField0_ & ~0x00000002); - runEvery_ = 0L; - onChanged(); - return this; - } - - // required int64 max_cache_entry_life = 3; - private long maxCacheEntryLife_ ; - /** - * required int64 max_cache_entry_life = 3; - */ - public boolean hasMaxCacheEntryLife() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required int64 max_cache_entry_life = 3; - */ - public long getMaxCacheEntryLife() { - return maxCacheEntryLife_; - } - /** - * required int64 max_cache_entry_life = 3; - */ - public Builder setMaxCacheEntryLife(long value) { - bitField0_ |= 0x00000004; - maxCacheEntryLife_ = value; - onChanged(); - return this; - } - /** - * required int64 max_cache_entry_life = 3; - */ - public Builder clearMaxCacheEntryLife() { - bitField0_ = (bitField0_ & ~0x00000004); - maxCacheEntryLife_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter) - } - - static { - defaultInstance = new AggrStatsInvalidatorFilter(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter) - } - - public interface ColumnStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional int64 last_analyzed = 1; - /** - * optional int64 last_analyzed = 1; - */ - boolean hasLastAnalyzed(); - /** - * optional int64 last_analyzed = 1; - */ - long getLastAnalyzed(); - - // required string column_type = 2; - /** - * required string column_type = 2; - */ - boolean hasColumnType(); - /** - * required string column_type = 2; - */ - java.lang.String getColumnType(); - /** - * required string column_type = 2; - */ - com.google.protobuf.ByteString - getColumnTypeBytes(); - - // optional int64 num_nulls = 3; - /** - * optional int64 num_nulls = 3; - */ - boolean hasNumNulls(); - /** - * optional int64 num_nulls = 3; - */ - long getNumNulls(); - - // optional int64 num_distinct_values = 4; - /** - * optional int64 num_distinct_values = 4; - */ - boolean hasNumDistinctValues(); - /** - * optional int64 num_distinct_values = 4; - */ - long getNumDistinctValues(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - boolean hasBoolStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - boolean hasLongStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - boolean hasDoubleStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - boolean hasStringStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - boolean hasBinaryStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - boolean hasDecimalStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder(); - - // optional string column_name = 11; - /** - * optional string column_name = 11; - */ - boolean hasColumnName(); - /** - * optional string column_name = 11; - */ - java.lang.String getColumnName(); - /** - * optional string column_name = 11; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); - - // optional string bit_vectors = 12; - /** - * optional string bit_vectors = 12; - */ - boolean hasBitVectors(); - /** - * optional string bit_vectors = 12; - */ - java.lang.String getBitVectors(); - /** - * optional string bit_vectors = 12; - */ - com.google.protobuf.ByteString - getBitVectorsBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} - */ - public static final class ColumnStats extends - com.google.protobuf.GeneratedMessage - implements ColumnStatsOrBuilder { - // Use ColumnStats.newBuilder() to construct. - private ColumnStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ColumnStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ColumnStats defaultInstance; - public static ColumnStats getDefaultInstance() { - return defaultInstance; - } - - public ColumnStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ColumnStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - lastAnalyzed_ = input.readInt64(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - columnType_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - numNulls_ = input.readInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - numDistinctValues_ = input.readInt64(); - break; - } - case 42: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder subBuilder = null; - if (((bitField0_ & 0x00000010) == 0x00000010)) { - subBuilder = boolStats_.toBuilder(); - } - boolStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(boolStats_); - boolStats_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000010; - break; - } - case 50: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder subBuilder = null; - if (((bitField0_ & 0x00000020) == 0x00000020)) { - subBuilder = longStats_.toBuilder(); - } - longStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(longStats_); - longStats_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000020; - break; - } - case 58: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder subBuilder = null; - if (((bitField0_ & 0x00000040) == 0x00000040)) { - subBuilder = doubleStats_.toBuilder(); - } - doubleStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(doubleStats_); - doubleStats_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000040; - break; - } - case 66: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder subBuilder = null; - if (((bitField0_ & 0x00000080) == 0x00000080)) { - subBuilder = stringStats_.toBuilder(); - } - stringStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(stringStats_); - stringStats_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000080; - break; - } - case 74: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder subBuilder = null; - if (((bitField0_ & 0x00000100) == 0x00000100)) { - subBuilder = binaryStats_.toBuilder(); - } - binaryStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(binaryStats_); - binaryStats_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000100; - break; - } - case 82: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder subBuilder = null; - if (((bitField0_ & 0x00000200) == 0x00000200)) { - subBuilder = decimalStats_.toBuilder(); - } - decimalStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(decimalStats_); - decimalStats_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000200; - break; - } - case 90: { - bitField0_ |= 0x00000400; - columnName_ = input.readBytes(); - break; - } - case 98: { - bitField0_ |= 0x00000800; - bitVectors_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ColumnStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ColumnStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface BooleanStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional int64 num_trues = 1; - /** - * optional int64 num_trues = 1; - */ - boolean hasNumTrues(); - /** - * optional int64 num_trues = 1; - */ - long getNumTrues(); - - // optional int64 num_falses = 2; - /** - * optional int64 num_falses = 2; - */ - boolean hasNumFalses(); - /** - * optional int64 num_falses = 2; - */ - long getNumFalses(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats} - */ - public static final class BooleanStats extends - com.google.protobuf.GeneratedMessage - implements BooleanStatsOrBuilder { - // Use BooleanStats.newBuilder() to construct. - private BooleanStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BooleanStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BooleanStats defaultInstance; - public static BooleanStats getDefaultInstance() { - return defaultInstance; - } - - public BooleanStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BooleanStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - numTrues_ = input.readInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - numFalses_ = input.readInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BooleanStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BooleanStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional int64 num_trues = 1; - public static final int NUM_TRUES_FIELD_NUMBER = 1; - private long numTrues_; - /** - * optional int64 num_trues = 1; - */ - public boolean hasNumTrues() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 num_trues = 1; - */ - public long getNumTrues() { - return numTrues_; - } - - // optional int64 num_falses = 2; - public static final int NUM_FALSES_FIELD_NUMBER = 2; - private long numFalses_; - /** - * optional int64 num_falses = 2; - */ - public boolean hasNumFalses() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 num_falses = 2; - */ - public long getNumFalses() { - return numFalses_; - } - - private void initFields() { - numTrues_ = 0L; - numFalses_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, numTrues_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, numFalses_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, numTrues_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, numFalses_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - numTrues_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - numFalses_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.numTrues_ = numTrues_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.numFalses_ = numFalses_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance()) return this; - if (other.hasNumTrues()) { - setNumTrues(other.getNumTrues()); - } - if (other.hasNumFalses()) { - setNumFalses(other.getNumFalses()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional int64 num_trues = 1; - private long numTrues_ ; - /** - * optional int64 num_trues = 1; - */ - public boolean hasNumTrues() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 num_trues = 1; - */ - public long getNumTrues() { - return numTrues_; - } - /** - * optional int64 num_trues = 1; - */ - public Builder setNumTrues(long value) { - bitField0_ |= 0x00000001; - numTrues_ = value; - onChanged(); - return this; - } - /** - * optional int64 num_trues = 1; - */ - public Builder clearNumTrues() { - bitField0_ = (bitField0_ & ~0x00000001); - numTrues_ = 0L; - onChanged(); - return this; - } - - // optional int64 num_falses = 2; - private long numFalses_ ; - /** - * optional int64 num_falses = 2; - */ - public boolean hasNumFalses() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 num_falses = 2; - */ - public long getNumFalses() { - return numFalses_; - } - /** - * optional int64 num_falses = 2; - */ - public Builder setNumFalses(long value) { - bitField0_ |= 0x00000002; - numFalses_ = value; - onChanged(); - return this; - } - /** - * optional int64 num_falses = 2; - */ - public Builder clearNumFalses() { - bitField0_ = (bitField0_ & ~0x00000002); - numFalses_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats) - } - - static { - defaultInstance = new BooleanStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats) - } - - public interface LongStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional sint64 low_value = 1; - /** - * optional sint64 low_value = 1; - */ - boolean hasLowValue(); - /** - * optional sint64 low_value = 1; - */ - long getLowValue(); - - // optional sint64 high_value = 2; - /** - * optional sint64 high_value = 2; - */ - boolean hasHighValue(); - /** - * optional sint64 high_value = 2; - */ - long getHighValue(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats} - */ - public static final class LongStats extends - com.google.protobuf.GeneratedMessage - implements LongStatsOrBuilder { - // Use LongStats.newBuilder() to construct. - private LongStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private LongStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final LongStats defaultInstance; - public static LongStats getDefaultInstance() { - return defaultInstance; - } - - public LongStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private LongStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - lowValue_ = input.readSInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - highValue_ = input.readSInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public LongStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new LongStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional sint64 low_value = 1; - public static final int LOW_VALUE_FIELD_NUMBER = 1; - private long lowValue_; - /** - * optional sint64 low_value = 1; - */ - public boolean hasLowValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional sint64 low_value = 1; - */ - public long getLowValue() { - return lowValue_; - } - - // optional sint64 high_value = 2; - public static final int HIGH_VALUE_FIELD_NUMBER = 2; - private long highValue_; - /** - * optional sint64 high_value = 2; - */ - public boolean hasHighValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional sint64 high_value = 2; - */ - public long getHighValue() { - return highValue_; - } - - private void initFields() { - lowValue_ = 0L; - highValue_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeSInt64(1, lowValue_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeSInt64(2, highValue_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt64Size(1, lowValue_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt64Size(2, highValue_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - lowValue_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - highValue_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.lowValue_ = lowValue_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.highValue_ = highValue_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance()) return this; - if (other.hasLowValue()) { - setLowValue(other.getLowValue()); - } - if (other.hasHighValue()) { - setHighValue(other.getHighValue()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional sint64 low_value = 1; - private long lowValue_ ; - /** - * optional sint64 low_value = 1; - */ - public boolean hasLowValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional sint64 low_value = 1; - */ - public long getLowValue() { - return lowValue_; - } - /** - * optional sint64 low_value = 1; - */ - public Builder setLowValue(long value) { - bitField0_ |= 0x00000001; - lowValue_ = value; - onChanged(); - return this; - } - /** - * optional sint64 low_value = 1; - */ - public Builder clearLowValue() { - bitField0_ = (bitField0_ & ~0x00000001); - lowValue_ = 0L; - onChanged(); - return this; - } - - // optional sint64 high_value = 2; - private long highValue_ ; - /** - * optional sint64 high_value = 2; - */ - public boolean hasHighValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional sint64 high_value = 2; - */ - public long getHighValue() { - return highValue_; - } - /** - * optional sint64 high_value = 2; - */ - public Builder setHighValue(long value) { - bitField0_ |= 0x00000002; - highValue_ = value; - onChanged(); - return this; - } - /** - * optional sint64 high_value = 2; - */ - public Builder clearHighValue() { - bitField0_ = (bitField0_ & ~0x00000002); - highValue_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats) - } - - static { - defaultInstance = new LongStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats) - } - - public interface DoubleStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional double low_value = 1; - /** - * optional double low_value = 1; - */ - boolean hasLowValue(); - /** - * optional double low_value = 1; - */ - double getLowValue(); - - // optional double high_value = 2; - /** - * optional double high_value = 2; - */ - boolean hasHighValue(); - /** - * optional double high_value = 2; - */ - double getHighValue(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats} - */ - public static final class DoubleStats extends - com.google.protobuf.GeneratedMessage - implements DoubleStatsOrBuilder { - // Use DoubleStats.newBuilder() to construct. - private DoubleStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private DoubleStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final DoubleStats defaultInstance; - public static DoubleStats getDefaultInstance() { - return defaultInstance; - } - - public DoubleStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private DoubleStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 9: { - bitField0_ |= 0x00000001; - lowValue_ = input.readDouble(); - break; - } - case 17: { - bitField0_ |= 0x00000002; - highValue_ = input.readDouble(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public DoubleStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new DoubleStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional double low_value = 1; - public static final int LOW_VALUE_FIELD_NUMBER = 1; - private double lowValue_; - /** - * optional double low_value = 1; - */ - public boolean hasLowValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional double low_value = 1; - */ - public double getLowValue() { - return lowValue_; - } - - // optional double high_value = 2; - public static final int HIGH_VALUE_FIELD_NUMBER = 2; - private double highValue_; - /** - * optional double high_value = 2; - */ - public boolean hasHighValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional double high_value = 2; - */ - public double getHighValue() { - return highValue_; - } - - private void initFields() { - lowValue_ = 0D; - highValue_ = 0D; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeDouble(1, lowValue_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeDouble(2, highValue_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeDoubleSize(1, lowValue_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeDoubleSize(2, highValue_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - lowValue_ = 0D; - bitField0_ = (bitField0_ & ~0x00000001); - highValue_ = 0D; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.lowValue_ = lowValue_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.highValue_ = highValue_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance()) return this; - if (other.hasLowValue()) { - setLowValue(other.getLowValue()); - } - if (other.hasHighValue()) { - setHighValue(other.getHighValue()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional double low_value = 1; - private double lowValue_ ; - /** - * optional double low_value = 1; - */ - public boolean hasLowValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional double low_value = 1; - */ - public double getLowValue() { - return lowValue_; - } - /** - * optional double low_value = 1; - */ - public Builder setLowValue(double value) { - bitField0_ |= 0x00000001; - lowValue_ = value; - onChanged(); - return this; - } - /** - * optional double low_value = 1; - */ - public Builder clearLowValue() { - bitField0_ = (bitField0_ & ~0x00000001); - lowValue_ = 0D; - onChanged(); - return this; - } - - // optional double high_value = 2; - private double highValue_ ; - /** - * optional double high_value = 2; - */ - public boolean hasHighValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional double high_value = 2; - */ - public double getHighValue() { - return highValue_; - } - /** - * optional double high_value = 2; - */ - public Builder setHighValue(double value) { - bitField0_ |= 0x00000002; - highValue_ = value; - onChanged(); - return this; - } - /** - * optional double high_value = 2; - */ - public Builder clearHighValue() { - bitField0_ = (bitField0_ & ~0x00000002); - highValue_ = 0D; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) - } - - static { - defaultInstance = new DoubleStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) - } - - public interface StringStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional int64 max_col_length = 1; - /** - * optional int64 max_col_length = 1; - */ - boolean hasMaxColLength(); - /** - * optional int64 max_col_length = 1; - */ - long getMaxColLength(); - - // optional double avg_col_length = 2; - /** - * optional double avg_col_length = 2; - */ - boolean hasAvgColLength(); - /** - * optional double avg_col_length = 2; - */ - double getAvgColLength(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} - */ - public static final class StringStats extends - com.google.protobuf.GeneratedMessage - implements StringStatsOrBuilder { - // Use StringStats.newBuilder() to construct. - private StringStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private StringStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final StringStats defaultInstance; - public static StringStats getDefaultInstance() { - return defaultInstance; - } - - public StringStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private StringStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - maxColLength_ = input.readInt64(); - break; - } - case 17: { - bitField0_ |= 0x00000002; - avgColLength_ = input.readDouble(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public StringStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new StringStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional int64 max_col_length = 1; - public static final int MAX_COL_LENGTH_FIELD_NUMBER = 1; - private long maxColLength_; - /** - * optional int64 max_col_length = 1; - */ - public boolean hasMaxColLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 max_col_length = 1; - */ - public long getMaxColLength() { - return maxColLength_; - } - - // optional double avg_col_length = 2; - public static final int AVG_COL_LENGTH_FIELD_NUMBER = 2; - private double avgColLength_; - /** - * optional double avg_col_length = 2; - */ - public boolean hasAvgColLength() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional double avg_col_length = 2; - */ - public double getAvgColLength() { - return avgColLength_; - } - - private void initFields() { - maxColLength_ = 0L; - avgColLength_ = 0D; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, maxColLength_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeDouble(2, avgColLength_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, maxColLength_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeDoubleSize(2, avgColLength_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - maxColLength_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - avgColLength_ = 0D; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.maxColLength_ = maxColLength_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.avgColLength_ = avgColLength_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) return this; - if (other.hasMaxColLength()) { - setMaxColLength(other.getMaxColLength()); - } - if (other.hasAvgColLength()) { - setAvgColLength(other.getAvgColLength()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional int64 max_col_length = 1; - private long maxColLength_ ; - /** - * optional int64 max_col_length = 1; - */ - public boolean hasMaxColLength() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 max_col_length = 1; - */ - public long getMaxColLength() { - return maxColLength_; - } - /** - * optional int64 max_col_length = 1; - */ - public Builder setMaxColLength(long value) { - bitField0_ |= 0x00000001; - maxColLength_ = value; - onChanged(); - return this; - } - /** - * optional int64 max_col_length = 1; - */ - public Builder clearMaxColLength() { - bitField0_ = (bitField0_ & ~0x00000001); - maxColLength_ = 0L; - onChanged(); - return this; - } - - // optional double avg_col_length = 2; - private double avgColLength_ ; - /** - * optional double avg_col_length = 2; - */ - public boolean hasAvgColLength() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional double avg_col_length = 2; - */ - public double getAvgColLength() { - return avgColLength_; - } - /** - * optional double avg_col_length = 2; - */ - public Builder setAvgColLength(double value) { - bitField0_ |= 0x00000002; - avgColLength_ = value; - onChanged(); - return this; - } - /** - * optional double avg_col_length = 2; - */ - public Builder clearAvgColLength() { - bitField0_ = (bitField0_ & ~0x00000002); - avgColLength_ = 0D; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats) - } - - static { - defaultInstance = new StringStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats) - } - - public interface DecimalStatsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - boolean hasLowValue(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - boolean hasHighValue(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats} - */ - public static final class DecimalStats extends - com.google.protobuf.GeneratedMessage - implements DecimalStatsOrBuilder { - // Use DecimalStats.newBuilder() to construct. - private DecimalStats(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private DecimalStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final DecimalStats defaultInstance; - public static DecimalStats getDefaultInstance() { - return defaultInstance; - } - - public DecimalStats getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private DecimalStats( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = lowValue_.toBuilder(); - } - lowValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(lowValue_); - lowValue_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 18: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = highValue_.toBuilder(); - } - highValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(highValue_); - highValue_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000002; - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public DecimalStats parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new DecimalStats(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface DecimalOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes unscaled = 1; - /** - * required bytes unscaled = 1; - */ - boolean hasUnscaled(); - /** - * required bytes unscaled = 1; - */ - com.google.protobuf.ByteString getUnscaled(); - - // required int32 scale = 2; - /** - * required int32 scale = 2; - */ - boolean hasScale(); - /** - * required int32 scale = 2; - */ - int getScale(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal} - */ - public static final class Decimal extends - com.google.protobuf.GeneratedMessage - implements DecimalOrBuilder { - // Use Decimal.newBuilder() to construct. - private Decimal(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Decimal(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Decimal defaultInstance; - public static Decimal getDefaultInstance() { - return defaultInstance; - } - - public Decimal getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Decimal( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - unscaled_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - scale_ = input.readInt32(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Decimal parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Decimal(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required bytes unscaled = 1; - public static final int UNSCALED_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString unscaled_; - /** - * required bytes unscaled = 1; - */ - public boolean hasUnscaled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes unscaled = 1; - */ - public com.google.protobuf.ByteString getUnscaled() { - return unscaled_; - } - - // required int32 scale = 2; - public static final int SCALE_FIELD_NUMBER = 2; - private int scale_; - /** - * required int32 scale = 2; - */ - public boolean hasScale() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required int32 scale = 2; - */ - public int getScale() { - return scale_; - } - - private void initFields() { - unscaled_ = com.google.protobuf.ByteString.EMPTY; - scale_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasUnscaled()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasScale()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, unscaled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt32(2, scale_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, unscaled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(2, scale_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - unscaled_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - scale_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.unscaled_ = unscaled_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.scale_ = scale_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) return this; - if (other.hasUnscaled()) { - setUnscaled(other.getUnscaled()); - } - if (other.hasScale()) { - setScale(other.getScale()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasUnscaled()) { - - return false; - } - if (!hasScale()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required bytes unscaled = 1; - private com.google.protobuf.ByteString unscaled_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes unscaled = 1; - */ - public boolean hasUnscaled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes unscaled = 1; - */ - public com.google.protobuf.ByteString getUnscaled() { - return unscaled_; - } - /** - * required bytes unscaled = 1; - */ - public Builder setUnscaled(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - unscaled_ = value; - onChanged(); - return this; - } - /** - * required bytes unscaled = 1; - */ - public Builder clearUnscaled() { - bitField0_ = (bitField0_ & ~0x00000001); - unscaled_ = getDefaultInstance().getUnscaled(); - onChanged(); - return this; - } - - // required int32 scale = 2; - private int scale_ ; - /** - * required int32 scale = 2; - */ - public boolean hasScale() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required int32 scale = 2; - */ - public int getScale() { - return scale_; - } - /** - * required int32 scale = 2; - */ - public Builder setScale(int value) { - bitField0_ |= 0x00000002; - scale_ = value; - onChanged(); - return this; - } - /** - * required int32 scale = 2; - */ - public Builder clearScale() { - bitField0_ = (bitField0_ & ~0x00000002); - scale_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal) - } - - static { - defaultInstance = new Decimal(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal) - } - - private int bitField0_; - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - public static final int LOW_VALUE_FIELD_NUMBER = 1; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal lowValue_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public boolean hasLowValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue() { - return lowValue_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder() { - return lowValue_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - public static final int HIGH_VALUE_FIELD_NUMBER = 2; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal highValue_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public boolean hasHighValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue() { - return highValue_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder() { - return highValue_; - } - - private void initFields() { - lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (hasLowValue()) { - if (!getLowValue().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasHighValue()) { - if (!getHighValue().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, lowValue_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, highValue_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, lowValue_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, highValue_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getLowValueFieldBuilder(); - getHighValueFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (lowValueBuilder_ == null) { - lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - } else { - lowValueBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (highValueBuilder_ == null) { - highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - } else { - highValueBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (lowValueBuilder_ == null) { - result.lowValue_ = lowValue_; - } else { - result.lowValue_ = lowValueBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (highValueBuilder_ == null) { - result.highValue_ = highValue_; - } else { - result.highValue_ = highValueBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance()) return this; - if (other.hasLowValue()) { - mergeLowValue(other.getLowValue()); - } - if (other.hasHighValue()) { - mergeHighValue(other.getHighValue()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasLowValue()) { - if (!getLowValue().isInitialized()) { - - return false; - } - } - if (hasHighValue()) { - if (!getHighValue().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> lowValueBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public boolean hasLowValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue() { - if (lowValueBuilder_ == null) { - return lowValue_; - } else { - return lowValueBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public Builder setLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { - if (lowValueBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - lowValue_ = value; - onChanged(); - } else { - lowValueBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public Builder setLowValue( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder builderForValue) { - if (lowValueBuilder_ == null) { - lowValue_ = builderForValue.build(); - onChanged(); - } else { - lowValueBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public Builder mergeLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { - if (lowValueBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - lowValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) { - lowValue_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder(lowValue_).mergeFrom(value).buildPartial(); - } else { - lowValue_ = value; - } - onChanged(); - } else { - lowValueBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public Builder clearLowValue() { - if (lowValueBuilder_ == null) { - lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - onChanged(); - } else { - lowValueBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder getLowValueBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getLowValueFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder() { - if (lowValueBuilder_ != null) { - return lowValueBuilder_.getMessageOrBuilder(); - } else { - return lowValue_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> - getLowValueFieldBuilder() { - if (lowValueBuilder_ == null) { - lowValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder>( - lowValue_, - getParentForChildren(), - isClean()); - lowValue_ = null; - } - return lowValueBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> highValueBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public boolean hasHighValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue() { - if (highValueBuilder_ == null) { - return highValue_; - } else { - return highValueBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public Builder setHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { - if (highValueBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - highValue_ = value; - onChanged(); - } else { - highValueBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public Builder setHighValue( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder builderForValue) { - if (highValueBuilder_ == null) { - highValue_ = builderForValue.build(); - onChanged(); - } else { - highValueBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public Builder mergeHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { - if (highValueBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - highValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) { - highValue_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder(highValue_).mergeFrom(value).buildPartial(); - } else { - highValue_ = value; - } - onChanged(); - } else { - highValueBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public Builder clearHighValue() { - if (highValueBuilder_ == null) { - highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); - onChanged(); - } else { - highValueBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder getHighValueBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getHighValueFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder() { - if (highValueBuilder_ != null) { - return highValueBuilder_.getMessageOrBuilder(); - } else { - return highValue_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> - getHighValueFieldBuilder() { - if (highValueBuilder_ == null) { - highValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder>( - highValue_, - getParentForChildren(), - isClean()); - highValue_ = null; - } - return highValueBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats) - } - - static { - defaultInstance = new DecimalStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats) - } - - private int bitField0_; - // optional int64 last_analyzed = 1; - public static final int LAST_ANALYZED_FIELD_NUMBER = 1; - private long lastAnalyzed_; - /** - * optional int64 last_analyzed = 1; - */ - public boolean hasLastAnalyzed() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 last_analyzed = 1; - */ - public long getLastAnalyzed() { - return lastAnalyzed_; - } - - // required string column_type = 2; - public static final int COLUMN_TYPE_FIELD_NUMBER = 2; - private java.lang.Object columnType_; - /** - * required string column_type = 2; - */ - public boolean hasColumnType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string column_type = 2; - */ - public java.lang.String getColumnType() { - java.lang.Object ref = columnType_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnType_ = s; - } - return s; - } - } - /** - * required string column_type = 2; - */ - public com.google.protobuf.ByteString - getColumnTypeBytes() { - java.lang.Object ref = columnType_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnType_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional int64 num_nulls = 3; - public static final int NUM_NULLS_FIELD_NUMBER = 3; - private long numNulls_; - /** - * optional int64 num_nulls = 3; - */ - public boolean hasNumNulls() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 num_nulls = 3; - */ - public long getNumNulls() { - return numNulls_; - } - - // optional int64 num_distinct_values = 4; - public static final int NUM_DISTINCT_VALUES_FIELD_NUMBER = 4; - private long numDistinctValues_; - /** - * optional int64 num_distinct_values = 4; - */ - public boolean hasNumDistinctValues() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional int64 num_distinct_values = 4; - */ - public long getNumDistinctValues() { - return numDistinctValues_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - public static final int BOOL_STATS_FIELD_NUMBER = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats boolStats_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public boolean hasBoolStats() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats() { - return boolStats_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder() { - return boolStats_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - public static final int LONG_STATS_FIELD_NUMBER = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats longStats_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public boolean hasLongStats() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats() { - return longStats_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder() { - return longStats_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - public static final int DOUBLE_STATS_FIELD_NUMBER = 7; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats doubleStats_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public boolean hasDoubleStats() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats() { - return doubleStats_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder() { - return doubleStats_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - public static final int STRING_STATS_FIELD_NUMBER = 8; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats stringStats_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public boolean hasStringStats() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats() { - return stringStats_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder() { - return stringStats_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - public static final int BINARY_STATS_FIELD_NUMBER = 9; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats binaryStats_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public boolean hasBinaryStats() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats() { - return binaryStats_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder() { - return binaryStats_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - public static final int DECIMAL_STATS_FIELD_NUMBER = 10; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats decimalStats_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public boolean hasDecimalStats() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats() { - return decimalStats_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder() { - return decimalStats_; - } - - // optional string column_name = 11; - public static final int COLUMN_NAME_FIELD_NUMBER = 11; - private java.lang.Object columnName_; - /** - * optional string column_name = 11; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional string column_name = 11; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * optional string column_name = 11; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string bit_vectors = 12; - public static final int BIT_VECTORS_FIELD_NUMBER = 12; - private java.lang.Object bitVectors_; - /** - * optional string bit_vectors = 12; - */ - public boolean hasBitVectors() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * optional string bit_vectors = 12; - */ - public java.lang.String getBitVectors() { - java.lang.Object ref = bitVectors_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - bitVectors_ = s; - } - return s; - } - } - /** - * optional string bit_vectors = 12; - */ - public com.google.protobuf.ByteString - getBitVectorsBytes() { - java.lang.Object ref = bitVectors_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - bitVectors_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - lastAnalyzed_ = 0L; - columnType_ = ""; - numNulls_ = 0L; - numDistinctValues_ = 0L; - boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); - longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); - doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); - stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); - columnName_ = ""; - bitVectors_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasColumnType()) { - memoizedIsInitialized = 0; - return false; - } - if (hasDecimalStats()) { - if (!getDecimalStats().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, lastAnalyzed_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getColumnTypeBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, numNulls_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(4, numDistinctValues_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeMessage(5, boolStats_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeMessage(6, longStats_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeMessage(7, doubleStats_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeMessage(8, stringStats_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeMessage(9, binaryStats_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeMessage(10, decimalStats_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeBytes(11, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - output.writeBytes(12, getBitVectorsBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, lastAnalyzed_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getColumnTypeBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, numNulls_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, numDistinctValues_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, boolStats_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, longStats_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, doubleStats_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(8, stringStats_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, binaryStats_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(10, decimalStats_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(12, getBitVectorsBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getBoolStatsFieldBuilder(); - getLongStatsFieldBuilder(); - getDoubleStatsFieldBuilder(); - getStringStatsFieldBuilder(); - getBinaryStatsFieldBuilder(); - getDecimalStatsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - lastAnalyzed_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - columnType_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - numNulls_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - numDistinctValues_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - if (boolStatsBuilder_ == null) { - boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); - } else { - boolStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - if (longStatsBuilder_ == null) { - longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); - } else { - longStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - if (doubleStatsBuilder_ == null) { - doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); - } else { - doubleStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); - if (stringStatsBuilder_ == null) { - stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - } else { - stringStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000080); - if (binaryStatsBuilder_ == null) { - binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - } else { - binaryStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - if (decimalStatsBuilder_ == null) { - decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); - } else { - decimalStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000200); - columnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000400); - bitVectors_ = ""; - bitField0_ = (bitField0_ & ~0x00000800); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.lastAnalyzed_ = lastAnalyzed_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.columnType_ = columnType_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.numNulls_ = numNulls_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.numDistinctValues_ = numDistinctValues_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - if (boolStatsBuilder_ == null) { - result.boolStats_ = boolStats_; - } else { - result.boolStats_ = boolStatsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - if (longStatsBuilder_ == null) { - result.longStats_ = longStats_; - } else { - result.longStats_ = longStatsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - if (doubleStatsBuilder_ == null) { - result.doubleStats_ = doubleStats_; - } else { - result.doubleStats_ = doubleStatsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - if (stringStatsBuilder_ == null) { - result.stringStats_ = stringStats_; - } else { - result.stringStats_ = stringStatsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000100; - } - if (binaryStatsBuilder_ == null) { - result.binaryStats_ = binaryStats_; - } else { - result.binaryStats_ = binaryStatsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000200; - } - if (decimalStatsBuilder_ == null) { - result.decimalStats_ = decimalStats_; - } else { - result.decimalStats_ = decimalStatsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000400; - } - result.columnName_ = columnName_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000800; - } - result.bitVectors_ = bitVectors_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()) return this; - if (other.hasLastAnalyzed()) { - setLastAnalyzed(other.getLastAnalyzed()); - } - if (other.hasColumnType()) { - bitField0_ |= 0x00000002; - columnType_ = other.columnType_; - onChanged(); - } - if (other.hasNumNulls()) { - setNumNulls(other.getNumNulls()); - } - if (other.hasNumDistinctValues()) { - setNumDistinctValues(other.getNumDistinctValues()); - } - if (other.hasBoolStats()) { - mergeBoolStats(other.getBoolStats()); - } - if (other.hasLongStats()) { - mergeLongStats(other.getLongStats()); - } - if (other.hasDoubleStats()) { - mergeDoubleStats(other.getDoubleStats()); - } - if (other.hasStringStats()) { - mergeStringStats(other.getStringStats()); - } - if (other.hasBinaryStats()) { - mergeBinaryStats(other.getBinaryStats()); - } - if (other.hasDecimalStats()) { - mergeDecimalStats(other.getDecimalStats()); - } - if (other.hasColumnName()) { - bitField0_ |= 0x00000400; - columnName_ = other.columnName_; - onChanged(); - } - if (other.hasBitVectors()) { - bitField0_ |= 0x00000800; - bitVectors_ = other.bitVectors_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasColumnType()) { - - return false; - } - if (hasDecimalStats()) { - if (!getDecimalStats().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional int64 last_analyzed = 1; - private long lastAnalyzed_ ; - /** - * optional int64 last_analyzed = 1; - */ - public boolean hasLastAnalyzed() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 last_analyzed = 1; - */ - public long getLastAnalyzed() { - return lastAnalyzed_; - } - /** - * optional int64 last_analyzed = 1; - */ - public Builder setLastAnalyzed(long value) { - bitField0_ |= 0x00000001; - lastAnalyzed_ = value; - onChanged(); - return this; - } - /** - * optional int64 last_analyzed = 1; - */ - public Builder clearLastAnalyzed() { - bitField0_ = (bitField0_ & ~0x00000001); - lastAnalyzed_ = 0L; - onChanged(); - return this; - } - - // required string column_type = 2; - private java.lang.Object columnType_ = ""; - /** - * required string column_type = 2; - */ - public boolean hasColumnType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string column_type = 2; - */ - public java.lang.String getColumnType() { - java.lang.Object ref = columnType_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnType_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_type = 2; - */ - public com.google.protobuf.ByteString - getColumnTypeBytes() { - java.lang.Object ref = columnType_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnType_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string column_type = 2; - */ - public Builder setColumnType( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - columnType_ = value; - onChanged(); - return this; - } - /** - * required string column_type = 2; - */ - public Builder clearColumnType() { - bitField0_ = (bitField0_ & ~0x00000002); - columnType_ = getDefaultInstance().getColumnType(); - onChanged(); - return this; - } - /** - * required string column_type = 2; - */ - public Builder setColumnTypeBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - columnType_ = value; - onChanged(); - return this; - } - - // optional int64 num_nulls = 3; - private long numNulls_ ; - /** - * optional int64 num_nulls = 3; - */ - public boolean hasNumNulls() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 num_nulls = 3; - */ - public long getNumNulls() { - return numNulls_; - } - /** - * optional int64 num_nulls = 3; - */ - public Builder setNumNulls(long value) { - bitField0_ |= 0x00000004; - numNulls_ = value; - onChanged(); - return this; - } - /** - * optional int64 num_nulls = 3; - */ - public Builder clearNumNulls() { - bitField0_ = (bitField0_ & ~0x00000004); - numNulls_ = 0L; - onChanged(); - return this; - } - - // optional int64 num_distinct_values = 4; - private long numDistinctValues_ ; - /** - * optional int64 num_distinct_values = 4; - */ - public boolean hasNumDistinctValues() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional int64 num_distinct_values = 4; - */ - public long getNumDistinctValues() { - return numDistinctValues_; - } - /** - * optional int64 num_distinct_values = 4; - */ - public Builder setNumDistinctValues(long value) { - bitField0_ |= 0x00000008; - numDistinctValues_ = value; - onChanged(); - return this; - } - /** - * optional int64 num_distinct_values = 4; - */ - public Builder clearNumDistinctValues() { - bitField0_ = (bitField0_ & ~0x00000008); - numDistinctValues_ = 0L; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder> boolStatsBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public boolean hasBoolStats() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats() { - if (boolStatsBuilder_ == null) { - return boolStats_; - } else { - return boolStatsBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public Builder setBoolStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats value) { - if (boolStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - boolStats_ = value; - onChanged(); - } else { - boolStatsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public Builder setBoolStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder builderForValue) { - if (boolStatsBuilder_ == null) { - boolStats_ = builderForValue.build(); - onChanged(); - } else { - boolStatsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public Builder mergeBoolStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats value) { - if (boolStatsBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010) && - boolStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance()) { - boolStats_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder(boolStats_).mergeFrom(value).buildPartial(); - } else { - boolStats_ = value; - } - onChanged(); - } else { - boolStatsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public Builder clearBoolStats() { - if (boolStatsBuilder_ == null) { - boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); - onChanged(); - } else { - boolStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder getBoolStatsBuilder() { - bitField0_ |= 0x00000010; - onChanged(); - return getBoolStatsFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder() { - if (boolStatsBuilder_ != null) { - return boolStatsBuilder_.getMessageOrBuilder(); - } else { - return boolStats_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder> - getBoolStatsFieldBuilder() { - if (boolStatsBuilder_ == null) { - boolStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder>( - boolStats_, - getParentForChildren(), - isClean()); - boolStats_ = null; - } - return boolStatsBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder> longStatsBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public boolean hasLongStats() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats() { - if (longStatsBuilder_ == null) { - return longStats_; - } else { - return longStatsBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public Builder setLongStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats value) { - if (longStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - longStats_ = value; - onChanged(); - } else { - longStatsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public Builder setLongStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder builderForValue) { - if (longStatsBuilder_ == null) { - longStats_ = builderForValue.build(); - onChanged(); - } else { - longStatsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public Builder mergeLongStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats value) { - if (longStatsBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020) && - longStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance()) { - longStats_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.newBuilder(longStats_).mergeFrom(value).buildPartial(); - } else { - longStats_ = value; - } - onChanged(); - } else { - longStatsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public Builder clearLongStats() { - if (longStatsBuilder_ == null) { - longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); - onChanged(); - } else { - longStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder getLongStatsBuilder() { - bitField0_ |= 0x00000020; - onChanged(); - return getLongStatsFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder() { - if (longStatsBuilder_ != null) { - return longStatsBuilder_.getMessageOrBuilder(); - } else { - return longStats_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder> - getLongStatsFieldBuilder() { - if (longStatsBuilder_ == null) { - longStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder>( - longStats_, - getParentForChildren(), - isClean()); - longStats_ = null; - } - return longStatsBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder> doubleStatsBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public boolean hasDoubleStats() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats() { - if (doubleStatsBuilder_ == null) { - return doubleStats_; - } else { - return doubleStatsBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public Builder setDoubleStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats value) { - if (doubleStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - doubleStats_ = value; - onChanged(); - } else { - doubleStatsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000040; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public Builder setDoubleStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder builderForValue) { - if (doubleStatsBuilder_ == null) { - doubleStats_ = builderForValue.build(); - onChanged(); - } else { - doubleStatsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000040; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public Builder mergeDoubleStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats value) { - if (doubleStatsBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040) && - doubleStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance()) { - doubleStats_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder(doubleStats_).mergeFrom(value).buildPartial(); - } else { - doubleStats_ = value; - } - onChanged(); - } else { - doubleStatsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000040; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public Builder clearDoubleStats() { - if (doubleStatsBuilder_ == null) { - doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); - onChanged(); - } else { - doubleStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000040); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder getDoubleStatsBuilder() { - bitField0_ |= 0x00000040; - onChanged(); - return getDoubleStatsFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder() { - if (doubleStatsBuilder_ != null) { - return doubleStatsBuilder_.getMessageOrBuilder(); - } else { - return doubleStats_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder> - getDoubleStatsFieldBuilder() { - if (doubleStatsBuilder_ == null) { - doubleStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder>( - doubleStats_, - getParentForChildren(), - isClean()); - doubleStats_ = null; - } - return doubleStatsBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> stringStatsBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public boolean hasStringStats() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats() { - if (stringStatsBuilder_ == null) { - return stringStats_; - } else { - return stringStatsBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public Builder setStringStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { - if (stringStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - stringStats_ = value; - onChanged(); - } else { - stringStatsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000080; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public Builder setStringStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder builderForValue) { - if (stringStatsBuilder_ == null) { - stringStats_ = builderForValue.build(); - onChanged(); - } else { - stringStatsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000080; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public Builder mergeStringStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { - if (stringStatsBuilder_ == null) { - if (((bitField0_ & 0x00000080) == 0x00000080) && - stringStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) { - stringStats_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(stringStats_).mergeFrom(value).buildPartial(); - } else { - stringStats_ = value; - } - onChanged(); - } else { - stringStatsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000080; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public Builder clearStringStats() { - if (stringStatsBuilder_ == null) { - stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - onChanged(); - } else { - stringStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000080); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder getStringStatsBuilder() { - bitField0_ |= 0x00000080; - onChanged(); - return getStringStatsFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder() { - if (stringStatsBuilder_ != null) { - return stringStatsBuilder_.getMessageOrBuilder(); - } else { - return stringStats_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> - getStringStatsFieldBuilder() { - if (stringStatsBuilder_ == null) { - stringStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder>( - stringStats_, - getParentForChildren(), - isClean()); - stringStats_ = null; - } - return stringStatsBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> binaryStatsBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public boolean hasBinaryStats() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats() { - if (binaryStatsBuilder_ == null) { - return binaryStats_; - } else { - return binaryStatsBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public Builder setBinaryStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { - if (binaryStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - binaryStats_ = value; - onChanged(); - } else { - binaryStatsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public Builder setBinaryStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder builderForValue) { - if (binaryStatsBuilder_ == null) { - binaryStats_ = builderForValue.build(); - onChanged(); - } else { - binaryStatsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public Builder mergeBinaryStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { - if (binaryStatsBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100) && - binaryStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) { - binaryStats_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(binaryStats_).mergeFrom(value).buildPartial(); - } else { - binaryStats_ = value; - } - onChanged(); - } else { - binaryStatsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public Builder clearBinaryStats() { - if (binaryStatsBuilder_ == null) { - binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); - onChanged(); - } else { - binaryStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder getBinaryStatsBuilder() { - bitField0_ |= 0x00000100; - onChanged(); - return getBinaryStatsFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder() { - if (binaryStatsBuilder_ != null) { - return binaryStatsBuilder_.getMessageOrBuilder(); - } else { - return binaryStats_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> - getBinaryStatsFieldBuilder() { - if (binaryStatsBuilder_ == null) { - binaryStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder>( - binaryStats_, - getParentForChildren(), - isClean()); - binaryStats_ = null; - } - return binaryStatsBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder> decimalStatsBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public boolean hasDecimalStats() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats() { - if (decimalStatsBuilder_ == null) { - return decimalStats_; - } else { - return decimalStatsBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public Builder setDecimalStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats value) { - if (decimalStatsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - decimalStats_ = value; - onChanged(); - } else { - decimalStatsBuilder_.setMessage(value); - } - bitField0_ |= 0x00000200; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public Builder setDecimalStats( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder builderForValue) { - if (decimalStatsBuilder_ == null) { - decimalStats_ = builderForValue.build(); - onChanged(); - } else { - decimalStatsBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000200; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public Builder mergeDecimalStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats value) { - if (decimalStatsBuilder_ == null) { - if (((bitField0_ & 0x00000200) == 0x00000200) && - decimalStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance()) { - decimalStats_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder(decimalStats_).mergeFrom(value).buildPartial(); - } else { - decimalStats_ = value; - } - onChanged(); - } else { - decimalStatsBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000200; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public Builder clearDecimalStats() { - if (decimalStatsBuilder_ == null) { - decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); - onChanged(); - } else { - decimalStatsBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000200); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder getDecimalStatsBuilder() { - bitField0_ |= 0x00000200; - onChanged(); - return getDecimalStatsFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder() { - if (decimalStatsBuilder_ != null) { - return decimalStatsBuilder_.getMessageOrBuilder(); - } else { - return decimalStats_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder> - getDecimalStatsFieldBuilder() { - if (decimalStatsBuilder_ == null) { - decimalStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder>( - decimalStats_, - getParentForChildren(), - isClean()); - decimalStats_ = null; - } - return decimalStatsBuilder_; - } - - // optional string column_name = 11; - private java.lang.Object columnName_ = ""; - /** - * optional string column_name = 11; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional string column_name = 11; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string column_name = 11; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string column_name = 11; - */ - public Builder setColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - columnName_ = value; - onChanged(); - return this; - } - /** - * optional string column_name = 11; - */ - public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000400); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * optional string column_name = 11; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - columnName_ = value; - onChanged(); - return this; - } - - // optional string bit_vectors = 12; - private java.lang.Object bitVectors_ = ""; - /** - * optional string bit_vectors = 12; - */ - public boolean hasBitVectors() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * optional string bit_vectors = 12; - */ - public java.lang.String getBitVectors() { - java.lang.Object ref = bitVectors_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - bitVectors_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string bit_vectors = 12; - */ - public com.google.protobuf.ByteString - getBitVectorsBytes() { - java.lang.Object ref = bitVectors_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - bitVectors_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string bit_vectors = 12; - */ - public Builder setBitVectors( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000800; - bitVectors_ = value; - onChanged(); - return this; - } - /** - * optional string bit_vectors = 12; - */ - public Builder clearBitVectors() { - bitField0_ = (bitField0_ & ~0x00000800); - bitVectors_ = getDefaultInstance().getBitVectors(); - onChanged(); - return this; - } - /** - * optional string bit_vectors = 12; - */ - public Builder setBitVectorsBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000800; - bitVectors_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) - } - - static { - defaultInstance = new ColumnStats(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) - } - - public interface DatabaseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string description = 1; - /** - * optional string description = 1; - */ - boolean hasDescription(); - /** - * optional string description = 1; - */ - java.lang.String getDescription(); - /** - * optional string description = 1; - */ - com.google.protobuf.ByteString - getDescriptionBytes(); - - // optional string uri = 2; - /** - * optional string uri = 2; - */ - boolean hasUri(); - /** - * optional string uri = 2; - */ - java.lang.String getUri(); - /** - * optional string uri = 2; - */ - com.google.protobuf.ByteString - getUriBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - boolean hasParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - boolean hasPrivileges(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); - - // optional string owner_name = 5; - /** - * optional string owner_name = 5; - */ - boolean hasOwnerName(); - /** - * optional string owner_name = 5; - */ - java.lang.String getOwnerName(); - /** - * optional string owner_name = 5; - */ - com.google.protobuf.ByteString - getOwnerNameBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - boolean hasOwnerType(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Database} - */ - public static final class Database extends - com.google.protobuf.GeneratedMessage - implements DatabaseOrBuilder { - // Use Database.newBuilder() to construct. - private Database(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Database(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Database defaultInstance; - public static Database getDefaultInstance() { - return defaultInstance; - } - - public Database getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Database( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - description_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - uri_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000004) == 0x00000004)) { - subBuilder = parameters_.toBuilder(); - } - parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(parameters_); - parameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000004; - break; - } - case 34: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; - if (((bitField0_ & 0x00000008) == 0x00000008)) { - subBuilder = privileges_.toBuilder(); - } - privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(privileges_); - privileges_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000008; - break; - } - case 42: { - bitField0_ |= 0x00000010; - ownerName_ = input.readBytes(); - break; - } - case 48: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(6, rawValue); - } else { - bitField0_ |= 0x00000020; - ownerType_ = value; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Database parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Database(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string description = 1; - public static final int DESCRIPTION_FIELD_NUMBER = 1; - private java.lang.Object description_; - /** - * optional string description = 1; - */ - public boolean hasDescription() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string description = 1; - */ - public java.lang.String getDescription() { - java.lang.Object ref = description_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - description_ = s; - } - return s; - } - } - /** - * optional string description = 1; - */ - public com.google.protobuf.ByteString - getDescriptionBytes() { - java.lang.Object ref = description_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - description_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string uri = 2; - public static final int URI_FIELD_NUMBER = 2; - private java.lang.Object uri_; - /** - * optional string uri = 2; - */ - public boolean hasUri() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string uri = 2; - */ - public java.lang.String getUri() { - java.lang.Object ref = uri_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - uri_ = s; - } - return s; - } - } - /** - * optional string uri = 2; - */ - public com.google.protobuf.ByteString - getUriBytes() { - java.lang.Object ref = uri_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - uri_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - public static final int PARAMETERS_FIELD_NUMBER = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - return parameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - return parameters_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - public static final int PRIVILEGES_FIELD_NUMBER = 4; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public boolean hasPrivileges() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { - return privileges_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { - return privileges_; - } - - // optional string owner_name = 5; - public static final int OWNER_NAME_FIELD_NUMBER = 5; - private java.lang.Object ownerName_; - /** - * optional string owner_name = 5; - */ - public boolean hasOwnerName() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional string owner_name = 5; - */ - public java.lang.String getOwnerName() { - java.lang.Object ref = ownerName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - ownerName_ = s; - } - return s; - } - } - /** - * optional string owner_name = 5; - */ - public com.google.protobuf.ByteString - getOwnerNameBytes() { - java.lang.Object ref = ownerName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ownerName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - public static final int OWNER_TYPE_FIELD_NUMBER = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - public boolean hasOwnerType() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { - return ownerType_; - } - - private void initFields() { - description_ = ""; - uri_ = ""; - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - ownerName_ = ""; - ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (hasParameters()) { - if (!getParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasPrivileges()) { - if (!getPrivileges().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getDescriptionBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getUriBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, parameters_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, privileges_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, getOwnerNameBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeEnum(6, ownerType_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getDescriptionBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getUriBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, parameters_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, privileges_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, getOwnerNameBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(6, ownerType_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Database} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DatabaseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getParametersFieldBuilder(); - getPrivilegesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - description_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - uri_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - if (privilegesBuilder_ == null) { - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - } else { - privilegesBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - ownerName_ = ""; - bitField0_ = (bitField0_ & ~0x00000010); - ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.description_ = description_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.uri_ = uri_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (parametersBuilder_ == null) { - result.parameters_ = parameters_; - } else { - result.parameters_ = parametersBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (privilegesBuilder_ == null) { - result.privileges_ = privileges_; - } else { - result.privileges_ = privilegesBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.ownerName_ = ownerName_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.ownerType_ = ownerType_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.getDefaultInstance()) return this; - if (other.hasDescription()) { - bitField0_ |= 0x00000001; - description_ = other.description_; - onChanged(); - } - if (other.hasUri()) { - bitField0_ |= 0x00000002; - uri_ = other.uri_; - onChanged(); - } - if (other.hasParameters()) { - mergeParameters(other.getParameters()); - } - if (other.hasPrivileges()) { - mergePrivileges(other.getPrivileges()); - } - if (other.hasOwnerName()) { - bitField0_ |= 0x00000010; - ownerName_ = other.ownerName_; - onChanged(); - } - if (other.hasOwnerType()) { - setOwnerType(other.getOwnerType()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasParameters()) { - if (!getParameters().isInitialized()) { - - return false; - } - } - if (hasPrivileges()) { - if (!getPrivileges().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional string description = 1; - private java.lang.Object description_ = ""; - /** - * optional string description = 1; - */ - public boolean hasDescription() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string description = 1; - */ - public java.lang.String getDescription() { - java.lang.Object ref = description_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - description_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string description = 1; - */ - public com.google.protobuf.ByteString - getDescriptionBytes() { - java.lang.Object ref = description_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - description_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string description = 1; - */ - public Builder setDescription( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - description_ = value; - onChanged(); - return this; - } - /** - * optional string description = 1; - */ - public Builder clearDescription() { - bitField0_ = (bitField0_ & ~0x00000001); - description_ = getDefaultInstance().getDescription(); - onChanged(); - return this; - } - /** - * optional string description = 1; - */ - public Builder setDescriptionBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - description_ = value; - onChanged(); - return this; - } - - // optional string uri = 2; - private java.lang.Object uri_ = ""; - /** - * optional string uri = 2; - */ - public boolean hasUri() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string uri = 2; - */ - public java.lang.String getUri() { - java.lang.Object ref = uri_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - uri_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string uri = 2; - */ - public com.google.protobuf.ByteString - getUriBytes() { - java.lang.Object ref = uri_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - uri_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string uri = 2; - */ - public Builder setUri( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - uri_ = value; - onChanged(); - return this; - } - /** - * optional string uri = 2; - */ - public Builder clearUri() { - bitField0_ = (bitField0_ & ~0x00000002); - uri_ = getDefaultInstance().getUri(); - onChanged(); - return this; - } - /** - * optional string uri = 2; - */ - public Builder setUriBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - uri_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - if (parametersBuilder_ == null) { - return parameters_; - } else { - return parametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - parameters_ = value; - onChanged(); - } else { - parametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder setParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (parametersBuilder_ == null) { - parameters_ = builderForValue.build(); - onChanged(); - } else { - parametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - parameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); - } else { - parameters_ = value; - } - onChanged(); - } else { - parametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder clearParameters() { - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - if (parametersBuilder_ != null) { - return parametersBuilder_.getMessageOrBuilder(); - } else { - return parameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getParametersFieldBuilder() { - if (parametersBuilder_ == null) { - parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - parameters_, - getParentForChildren(), - isClean()); - parameters_ = null; - } - return parametersBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public boolean hasPrivileges() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { - if (privilegesBuilder_ == null) { - return privileges_; - } else { - return privilegesBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { - if (privilegesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - privileges_ = value; - onChanged(); - } else { - privilegesBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public Builder setPrivileges( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { - if (privilegesBuilder_ == null) { - privileges_ = builderForValue.build(); - onChanged(); - } else { - privilegesBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { - if (privilegesBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { - privileges_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); - } else { - privileges_ = value; - } - onChanged(); - } else { - privilegesBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public Builder clearPrivileges() { - if (privilegesBuilder_ == null) { - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - onChanged(); - } else { - privilegesBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getPrivilegesFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { - if (privilegesBuilder_ != null) { - return privilegesBuilder_.getMessageOrBuilder(); - } else { - return privileges_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> - getPrivilegesFieldBuilder() { - if (privilegesBuilder_ == null) { - privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( - privileges_, - getParentForChildren(), - isClean()); - privileges_ = null; - } - return privilegesBuilder_; - } - - // optional string owner_name = 5; - private java.lang.Object ownerName_ = ""; - /** - * optional string owner_name = 5; - */ - public boolean hasOwnerName() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional string owner_name = 5; - */ - public java.lang.String getOwnerName() { - java.lang.Object ref = ownerName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - ownerName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string owner_name = 5; - */ - public com.google.protobuf.ByteString - getOwnerNameBytes() { - java.lang.Object ref = ownerName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ownerName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string owner_name = 5; - */ - public Builder setOwnerName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - ownerName_ = value; - onChanged(); - return this; - } - /** - * optional string owner_name = 5; - */ - public Builder clearOwnerName() { - bitField0_ = (bitField0_ & ~0x00000010); - ownerName_ = getDefaultInstance().getOwnerName(); - onChanged(); - return this; - } - /** - * optional string owner_name = 5; - */ - public Builder setOwnerNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - ownerName_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - public boolean hasOwnerType() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { - return ownerType_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - public Builder setOwnerType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - ownerType_ = value; - onChanged(); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; - */ - public Builder clearOwnerType() { - bitField0_ = (bitField0_ & ~0x00000020); - ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Database) - } - - static { - defaultInstance = new Database(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Database) - } - - public interface DelegationTokenOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string token_str = 1; - /** - * required string token_str = 1; - */ - boolean hasTokenStr(); - /** - * required string token_str = 1; - */ - java.lang.String getTokenStr(); - /** - * required string token_str = 1; - */ - com.google.protobuf.ByteString - getTokenStrBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.DelegationToken} - */ - public static final class DelegationToken extends - com.google.protobuf.GeneratedMessage - implements DelegationTokenOrBuilder { - // Use DelegationToken.newBuilder() to construct. - private DelegationToken(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private DelegationToken(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final DelegationToken defaultInstance; - public static DelegationToken getDefaultInstance() { - return defaultInstance; - } - - public DelegationToken getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private DelegationToken( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - tokenStr_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public DelegationToken parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new DelegationToken(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string token_str = 1; - public static final int TOKEN_STR_FIELD_NUMBER = 1; - private java.lang.Object tokenStr_; - /** - * required string token_str = 1; - */ - public boolean hasTokenStr() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string token_str = 1; - */ - public java.lang.String getTokenStr() { - java.lang.Object ref = tokenStr_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - tokenStr_ = s; - } - return s; - } - } - /** - * required string token_str = 1; - */ - public com.google.protobuf.ByteString - getTokenStrBytes() { - java.lang.Object ref = tokenStr_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - tokenStr_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - tokenStr_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasTokenStr()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getTokenStrBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getTokenStrBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.DelegationToken} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationTokenOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - tokenStr_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.tokenStr_ = tokenStr_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.getDefaultInstance()) return this; - if (other.hasTokenStr()) { - bitField0_ |= 0x00000001; - tokenStr_ = other.tokenStr_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasTokenStr()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string token_str = 1; - private java.lang.Object tokenStr_ = ""; - /** - * required string token_str = 1; - */ - public boolean hasTokenStr() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string token_str = 1; - */ - public java.lang.String getTokenStr() { - java.lang.Object ref = tokenStr_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - tokenStr_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string token_str = 1; - */ - public com.google.protobuf.ByteString - getTokenStrBytes() { - java.lang.Object ref = tokenStr_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - tokenStr_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string token_str = 1; - */ - public Builder setTokenStr( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tokenStr_ = value; - onChanged(); - return this; - } - /** - * required string token_str = 1; - */ - public Builder clearTokenStr() { - bitField0_ = (bitField0_ & ~0x00000001); - tokenStr_ = getDefaultInstance().getTokenStr(); - onChanged(); - return this; - } - /** - * required string token_str = 1; - */ - public Builder setTokenStrBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tokenStr_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.DelegationToken) - } - - static { - defaultInstance = new DelegationToken(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.DelegationToken) - } - - public interface FieldSchemaOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string name = 1; - /** - * required string name = 1; - */ - boolean hasName(); - /** - * required string name = 1; - */ - java.lang.String getName(); - /** - * required string name = 1; - */ - com.google.protobuf.ByteString - getNameBytes(); - - // required string type = 2; - /** - * required string type = 2; - */ - boolean hasType(); - /** - * required string type = 2; - */ - java.lang.String getType(); - /** - * required string type = 2; - */ - com.google.protobuf.ByteString - getTypeBytes(); - - // optional string comment = 3; - /** - * optional string comment = 3; - */ - boolean hasComment(); - /** - * optional string comment = 3; - */ - java.lang.String getComment(); - /** - * optional string comment = 3; - */ - com.google.protobuf.ByteString - getCommentBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.FieldSchema} - */ - public static final class FieldSchema extends - com.google.protobuf.GeneratedMessage - implements FieldSchemaOrBuilder { - // Use FieldSchema.newBuilder() to construct. - private FieldSchema(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private FieldSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final FieldSchema defaultInstance; - public static FieldSchema getDefaultInstance() { - return defaultInstance; - } - - public FieldSchema getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private FieldSchema( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - name_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - type_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - comment_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public FieldSchema parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FieldSchema(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; - /** - * required string name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - name_ = s; - } - return s; - } - } - /** - * required string name = 1; - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string type = 2; - public static final int TYPE_FIELD_NUMBER = 2; - private java.lang.Object type_; - /** - * required string type = 2; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string type = 2; - */ - public java.lang.String getType() { - java.lang.Object ref = type_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - type_ = s; - } - return s; - } - } - /** - * required string type = 2; - */ - public com.google.protobuf.ByteString - getTypeBytes() { - java.lang.Object ref = type_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - type_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string comment = 3; - public static final int COMMENT_FIELD_NUMBER = 3; - private java.lang.Object comment_; - /** - * optional string comment = 3; - */ - public boolean hasComment() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string comment = 3; - */ - public java.lang.String getComment() { - java.lang.Object ref = comment_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - comment_ = s; - } - return s; - } - } - /** - * optional string comment = 3; - */ - public com.google.protobuf.ByteString - getCommentBytes() { - java.lang.Object ref = comment_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - comment_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - name_ = ""; - type_ = ""; - comment_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getTypeBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getCommentBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getTypeBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getCommentBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.FieldSchema} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - name_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - type_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - comment_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.name_ = name_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.comment_ = comment_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()) return this; - if (other.hasName()) { - bitField0_ |= 0x00000001; - name_ = other.name_; - onChanged(); - } - if (other.hasType()) { - bitField0_ |= 0x00000002; - type_ = other.type_; - onChanged(); - } - if (other.hasComment()) { - bitField0_ |= 0x00000004; - comment_ = other.comment_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasName()) { - - return false; - } - if (!hasType()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string name = 1; - private java.lang.Object name_ = ""; - /** - * required string name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string name = 1; - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string name = 1; - */ - public Builder setName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - /** - * required string name = 1; - */ - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * required string name = 1; - */ - public Builder setNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - - // required string type = 2; - private java.lang.Object type_ = ""; - /** - * required string type = 2; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string type = 2; - */ - public java.lang.String getType() { - java.lang.Object ref = type_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - type_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string type = 2; - */ - public com.google.protobuf.ByteString - getTypeBytes() { - java.lang.Object ref = type_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - type_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string type = 2; - */ - public Builder setType( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - type_ = value; - onChanged(); - return this; - } - /** - * required string type = 2; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000002); - type_ = getDefaultInstance().getType(); - onChanged(); - return this; - } - /** - * required string type = 2; - */ - public Builder setTypeBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - type_ = value; - onChanged(); - return this; - } - - // optional string comment = 3; - private java.lang.Object comment_ = ""; - /** - * optional string comment = 3; - */ - public boolean hasComment() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string comment = 3; - */ - public java.lang.String getComment() { - java.lang.Object ref = comment_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - comment_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string comment = 3; - */ - public com.google.protobuf.ByteString - getCommentBytes() { - java.lang.Object ref = comment_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - comment_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string comment = 3; - */ - public Builder setComment( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - comment_ = value; - onChanged(); - return this; - } - /** - * optional string comment = 3; - */ - public Builder clearComment() { - bitField0_ = (bitField0_ & ~0x00000004); - comment_ = getDefaultInstance().getComment(); - onChanged(); - return this; - } - /** - * optional string comment = 3; - */ - public Builder setCommentBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - comment_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.FieldSchema) - } - - static { - defaultInstance = new FieldSchema(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.FieldSchema) - } - - public interface FunctionOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string class_name = 1; - /** - * optional string class_name = 1; - */ - boolean hasClassName(); - /** - * optional string class_name = 1; - */ - java.lang.String getClassName(); - /** - * optional string class_name = 1; - */ - com.google.protobuf.ByteString - getClassNameBytes(); - - // optional string owner_name = 2; - /** - * optional string owner_name = 2; - */ - boolean hasOwnerName(); - /** - * optional string owner_name = 2; - */ - java.lang.String getOwnerName(); - /** - * optional string owner_name = 2; - */ - com.google.protobuf.ByteString - getOwnerNameBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - boolean hasOwnerType(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType(); - - // optional sint64 create_time = 4; - /** - * optional sint64 create_time = 4; - */ - boolean hasCreateTime(); - /** - * optional sint64 create_time = 4; - */ - long getCreateTime(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - boolean hasFunctionType(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType getFunctionType(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - java.util.List - getResourceUrisList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getResourceUris(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - int getResourceUrisCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - java.util.List - getResourceUrisOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder getResourceUrisOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function} - */ - public static final class Function extends - com.google.protobuf.GeneratedMessage - implements FunctionOrBuilder { - // Use Function.newBuilder() to construct. - private Function(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Function(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Function defaultInstance; - public static Function getDefaultInstance() { - return defaultInstance; - } - - public Function getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Function( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - className_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - ownerName_ = input.readBytes(); - break; - } - case 24: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(3, rawValue); - } else { - bitField0_ |= 0x00000004; - ownerType_ = value; - } - break; - } - case 32: { - bitField0_ |= 0x00000008; - createTime_ = input.readSInt64(); - break; - } - case 40: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(5, rawValue); - } else { - bitField0_ |= 0x00000010; - functionType_ = value; - } - break; - } - case 50: { - if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { - resourceUris_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000020; - } - resourceUris_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { - resourceUris_ = java.util.Collections.unmodifiableList(resourceUris_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Function parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Function(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.Function.FunctionType} - */ - public enum FunctionType - implements com.google.protobuf.ProtocolMessageEnum { - /** - * JAVA = 1; - */ - JAVA(0, 1), - ; - - /** - * JAVA = 1; - */ - public static final int JAVA_VALUE = 1; - - - public final int getNumber() { return value; } - - public static FunctionType valueOf(int value) { - switch (value) { - case 1: return JAVA; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public FunctionType findValueByNumber(int number) { - return FunctionType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.getDescriptor().getEnumTypes().get(0); - } - - private static final FunctionType[] VALUES = values(); - - public static FunctionType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private FunctionType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.Function.FunctionType) - } - - public interface ResourceUriOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - boolean hasResourceType(); - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType getResourceType(); - - // required string uri = 2; - /** - * required string uri = 2; - */ - boolean hasUri(); - /** - * required string uri = 2; - */ - java.lang.String getUri(); - /** - * required string uri = 2; - */ - com.google.protobuf.ByteString - getUriBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri} - */ - public static final class ResourceUri extends - com.google.protobuf.GeneratedMessage - implements ResourceUriOrBuilder { - // Use ResourceUri.newBuilder() to construct. - private ResourceUri(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ResourceUri(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ResourceUri defaultInstance; - public static ResourceUri getDefaultInstance() { - return defaultInstance; - } - - public ResourceUri getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ResourceUri( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - resourceType_ = value; - } - break; - } - case 18: { - bitField0_ |= 0x00000002; - uri_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ResourceUri parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ResourceUri(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType} - */ - public enum ResourceType - implements com.google.protobuf.ProtocolMessageEnum { - /** - * JAR = 1; - */ - JAR(0, 1), - /** - * FILE = 2; - */ - FILE(1, 2), - /** - * ARCHIVE = 3; - */ - ARCHIVE(2, 3), - ; - - /** - * JAR = 1; - */ - public static final int JAR_VALUE = 1; - /** - * FILE = 2; - */ - public static final int FILE_VALUE = 2; - /** - * ARCHIVE = 3; - */ - public static final int ARCHIVE_VALUE = 3; - - - public final int getNumber() { return value; } - - public static ResourceType valueOf(int value) { - switch (value) { - case 1: return JAR; - case 2: return FILE; - case 3: return ARCHIVE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public ResourceType findValueByNumber(int number) { - return ResourceType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDescriptor().getEnumTypes().get(0); - } - - private static final ResourceType[] VALUES = values(); - - public static ResourceType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private ResourceType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType) - } - - private int bitField0_; - // required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - public static final int RESOURCE_TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType resourceType_; - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - public boolean hasResourceType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType getResourceType() { - return resourceType_; - } - - // required string uri = 2; - public static final int URI_FIELD_NUMBER = 2; - private java.lang.Object uri_; - /** - * required string uri = 2; - */ - public boolean hasUri() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string uri = 2; - */ - public java.lang.String getUri() { - java.lang.Object ref = uri_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - uri_ = s; - } - return s; - } - } - /** - * required string uri = 2; - */ - public com.google.protobuf.ByteString - getUriBytes() { - java.lang.Object ref = uri_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - uri_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; - uri_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasResourceType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasUri()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, resourceType_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getUriBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, resourceType_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getUriBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; - bitField0_ = (bitField0_ & ~0x00000001); - uri_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.resourceType_ = resourceType_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.uri_ = uri_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance()) return this; - if (other.hasResourceType()) { - setResourceType(other.getResourceType()); - } - if (other.hasUri()) { - bitField0_ |= 0x00000002; - uri_ = other.uri_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasResourceType()) { - - return false; - } - if (!hasUri()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - public boolean hasResourceType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType getResourceType() { - return resourceType_; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - public Builder setResourceType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - resourceType_ = value; - onChanged(); - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; - */ - public Builder clearResourceType() { - bitField0_ = (bitField0_ & ~0x00000001); - resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; - onChanged(); - return this; - } - - // required string uri = 2; - private java.lang.Object uri_ = ""; - /** - * required string uri = 2; - */ - public boolean hasUri() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string uri = 2; - */ - public java.lang.String getUri() { - java.lang.Object ref = uri_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - uri_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string uri = 2; - */ - public com.google.protobuf.ByteString - getUriBytes() { - java.lang.Object ref = uri_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - uri_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string uri = 2; - */ - public Builder setUri( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - uri_ = value; - onChanged(); - return this; - } - /** - * required string uri = 2; - */ - public Builder clearUri() { - bitField0_ = (bitField0_ & ~0x00000002); - uri_ = getDefaultInstance().getUri(); - onChanged(); - return this; - } - /** - * required string uri = 2; - */ - public Builder setUriBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - uri_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri) - } - - static { - defaultInstance = new ResourceUri(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri) - } - - private int bitField0_; - // optional string class_name = 1; - public static final int CLASS_NAME_FIELD_NUMBER = 1; - private java.lang.Object className_; - /** - * optional string class_name = 1; - */ - public boolean hasClassName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string class_name = 1; - */ - public java.lang.String getClassName() { - java.lang.Object ref = className_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - className_ = s; - } - return s; - } - } - /** - * optional string class_name = 1; - */ - public com.google.protobuf.ByteString - getClassNameBytes() { - java.lang.Object ref = className_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - className_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string owner_name = 2; - public static final int OWNER_NAME_FIELD_NUMBER = 2; - private java.lang.Object ownerName_; - /** - * optional string owner_name = 2; - */ - public boolean hasOwnerName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string owner_name = 2; - */ - public java.lang.String getOwnerName() { - java.lang.Object ref = ownerName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - ownerName_ = s; - } - return s; - } - } - /** - * optional string owner_name = 2; - */ - public com.google.protobuf.ByteString - getOwnerNameBytes() { - java.lang.Object ref = ownerName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ownerName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - public static final int OWNER_TYPE_FIELD_NUMBER = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - public boolean hasOwnerType() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { - return ownerType_; - } - - // optional sint64 create_time = 4; - public static final int CREATE_TIME_FIELD_NUMBER = 4; - private long createTime_; - /** - * optional sint64 create_time = 4; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional sint64 create_time = 4; - */ - public long getCreateTime() { - return createTime_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - public static final int FUNCTION_TYPE_FIELD_NUMBER = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType functionType_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - public boolean hasFunctionType() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType getFunctionType() { - return functionType_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - public static final int RESOURCE_URIS_FIELD_NUMBER = 6; - private java.util.List resourceUris_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public java.util.List getResourceUrisList() { - return resourceUris_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public java.util.List - getResourceUrisOrBuilderList() { - return resourceUris_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public int getResourceUrisCount() { - return resourceUris_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getResourceUris(int index) { - return resourceUris_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder getResourceUrisOrBuilder( - int index) { - return resourceUris_.get(index); - } - - private void initFields() { - className_ = ""; - ownerName_ = ""; - ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - createTime_ = 0L; - functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; - resourceUris_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getResourceUrisCount(); i++) { - if (!getResourceUris(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getClassNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getOwnerNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeEnum(3, ownerType_.getNumber()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeSInt64(4, createTime_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeEnum(5, functionType_.getNumber()); - } - for (int i = 0; i < resourceUris_.size(); i++) { - output.writeMessage(6, resourceUris_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getClassNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getOwnerNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, ownerType_.getNumber()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt64Size(4, createTime_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(5, functionType_.getNumber()); - } - for (int i = 0; i < resourceUris_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, resourceUris_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FunctionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getResourceUrisFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - className_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - ownerName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - bitField0_ = (bitField0_ & ~0x00000004); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; - bitField0_ = (bitField0_ & ~0x00000010); - if (resourceUrisBuilder_ == null) { - resourceUris_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); - } else { - resourceUrisBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.className_ = className_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.ownerName_ = ownerName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.ownerType_ = ownerType_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.functionType_ = functionType_; - if (resourceUrisBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020)) { - resourceUris_ = java.util.Collections.unmodifiableList(resourceUris_); - bitField0_ = (bitField0_ & ~0x00000020); - } - result.resourceUris_ = resourceUris_; - } else { - result.resourceUris_ = resourceUrisBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.getDefaultInstance()) return this; - if (other.hasClassName()) { - bitField0_ |= 0x00000001; - className_ = other.className_; - onChanged(); - } - if (other.hasOwnerName()) { - bitField0_ |= 0x00000002; - ownerName_ = other.ownerName_; - onChanged(); - } - if (other.hasOwnerType()) { - setOwnerType(other.getOwnerType()); - } - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasFunctionType()) { - setFunctionType(other.getFunctionType()); - } - if (resourceUrisBuilder_ == null) { - if (!other.resourceUris_.isEmpty()) { - if (resourceUris_.isEmpty()) { - resourceUris_ = other.resourceUris_; - bitField0_ = (bitField0_ & ~0x00000020); - } else { - ensureResourceUrisIsMutable(); - resourceUris_.addAll(other.resourceUris_); - } - onChanged(); - } - } else { - if (!other.resourceUris_.isEmpty()) { - if (resourceUrisBuilder_.isEmpty()) { - resourceUrisBuilder_.dispose(); - resourceUrisBuilder_ = null; - resourceUris_ = other.resourceUris_; - bitField0_ = (bitField0_ & ~0x00000020); - resourceUrisBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getResourceUrisFieldBuilder() : null; - } else { - resourceUrisBuilder_.addAllMessages(other.resourceUris_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getResourceUrisCount(); i++) { - if (!getResourceUris(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional string class_name = 1; - private java.lang.Object className_ = ""; - /** - * optional string class_name = 1; - */ - public boolean hasClassName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string class_name = 1; - */ - public java.lang.String getClassName() { - java.lang.Object ref = className_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - className_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string class_name = 1; - */ - public com.google.protobuf.ByteString - getClassNameBytes() { - java.lang.Object ref = className_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - className_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string class_name = 1; - */ - public Builder setClassName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - className_ = value; - onChanged(); - return this; - } - /** - * optional string class_name = 1; - */ - public Builder clearClassName() { - bitField0_ = (bitField0_ & ~0x00000001); - className_ = getDefaultInstance().getClassName(); - onChanged(); - return this; - } - /** - * optional string class_name = 1; - */ - public Builder setClassNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - className_ = value; - onChanged(); - return this; - } - - // optional string owner_name = 2; - private java.lang.Object ownerName_ = ""; - /** - * optional string owner_name = 2; - */ - public boolean hasOwnerName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string owner_name = 2; - */ - public java.lang.String getOwnerName() { - java.lang.Object ref = ownerName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - ownerName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string owner_name = 2; - */ - public com.google.protobuf.ByteString - getOwnerNameBytes() { - java.lang.Object ref = ownerName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ownerName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string owner_name = 2; - */ - public Builder setOwnerName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - ownerName_ = value; - onChanged(); - return this; - } - /** - * optional string owner_name = 2; - */ - public Builder clearOwnerName() { - bitField0_ = (bitField0_ & ~0x00000002); - ownerName_ = getDefaultInstance().getOwnerName(); - onChanged(); - return this; - } - /** - * optional string owner_name = 2; - */ - public Builder setOwnerNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - ownerName_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - public boolean hasOwnerType() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { - return ownerType_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - public Builder setOwnerType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - ownerType_ = value; - onChanged(); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; - */ - public Builder clearOwnerType() { - bitField0_ = (bitField0_ & ~0x00000004); - ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - onChanged(); - return this; - } - - // optional sint64 create_time = 4; - private long createTime_ ; - /** - * optional sint64 create_time = 4; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional sint64 create_time = 4; - */ - public long getCreateTime() { - return createTime_; - } - /** - * optional sint64 create_time = 4; - */ - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000008; - createTime_ = value; - onChanged(); - return this; - } - /** - * optional sint64 create_time = 4; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000008); - createTime_ = 0L; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - public boolean hasFunctionType() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType getFunctionType() { - return functionType_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - public Builder setFunctionType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - functionType_ = value; - onChanged(); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; - */ - public Builder clearFunctionType() { - bitField0_ = (bitField0_ & ~0x00000010); - functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - private java.util.List resourceUris_ = - java.util.Collections.emptyList(); - private void ensureResourceUrisIsMutable() { - if (!((bitField0_ & 0x00000020) == 0x00000020)) { - resourceUris_ = new java.util.ArrayList(resourceUris_); - bitField0_ |= 0x00000020; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder> resourceUrisBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public java.util.List getResourceUrisList() { - if (resourceUrisBuilder_ == null) { - return java.util.Collections.unmodifiableList(resourceUris_); - } else { - return resourceUrisBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public int getResourceUrisCount() { - if (resourceUrisBuilder_ == null) { - return resourceUris_.size(); - } else { - return resourceUrisBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getResourceUris(int index) { - if (resourceUrisBuilder_ == null) { - return resourceUris_.get(index); - } else { - return resourceUrisBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder setResourceUris( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri value) { - if (resourceUrisBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureResourceUrisIsMutable(); - resourceUris_.set(index, value); - onChanged(); - } else { - resourceUrisBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder setResourceUris( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder builderForValue) { - if (resourceUrisBuilder_ == null) { - ensureResourceUrisIsMutable(); - resourceUris_.set(index, builderForValue.build()); - onChanged(); - } else { - resourceUrisBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder addResourceUris(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri value) { - if (resourceUrisBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureResourceUrisIsMutable(); - resourceUris_.add(value); - onChanged(); - } else { - resourceUrisBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder addResourceUris( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri value) { - if (resourceUrisBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureResourceUrisIsMutable(); - resourceUris_.add(index, value); - onChanged(); - } else { - resourceUrisBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder addResourceUris( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder builderForValue) { - if (resourceUrisBuilder_ == null) { - ensureResourceUrisIsMutable(); - resourceUris_.add(builderForValue.build()); - onChanged(); - } else { - resourceUrisBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder addResourceUris( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder builderForValue) { - if (resourceUrisBuilder_ == null) { - ensureResourceUrisIsMutable(); - resourceUris_.add(index, builderForValue.build()); - onChanged(); - } else { - resourceUrisBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder addAllResourceUris( - java.lang.Iterable values) { - if (resourceUrisBuilder_ == null) { - ensureResourceUrisIsMutable(); - super.addAll(values, resourceUris_); - onChanged(); - } else { - resourceUrisBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder clearResourceUris() { - if (resourceUrisBuilder_ == null) { - resourceUris_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); - onChanged(); - } else { - resourceUrisBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public Builder removeResourceUris(int index) { - if (resourceUrisBuilder_ == null) { - ensureResourceUrisIsMutable(); - resourceUris_.remove(index); - onChanged(); - } else { - resourceUrisBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder getResourceUrisBuilder( - int index) { - return getResourceUrisFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder getResourceUrisOrBuilder( - int index) { - if (resourceUrisBuilder_ == null) { - return resourceUris_.get(index); } else { - return resourceUrisBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public java.util.List - getResourceUrisOrBuilderList() { - if (resourceUrisBuilder_ != null) { - return resourceUrisBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(resourceUris_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder addResourceUrisBuilder() { - return getResourceUrisFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder addResourceUrisBuilder( - int index) { - return getResourceUrisFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; - */ - public java.util.List - getResourceUrisBuilderList() { - return getResourceUrisFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder> - getResourceUrisFieldBuilder() { - if (resourceUrisBuilder_ == null) { - resourceUrisBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder>( - resourceUris_, - ((bitField0_ & 0x00000020) == 0x00000020), - getParentForChildren(), - isClean()); - resourceUris_ = null; - } - return resourceUrisBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Function) - } - - static { - defaultInstance = new Function(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Function) - } - - public interface MasterKeyOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string master_key = 1; - /** - * required string master_key = 1; - */ - boolean hasMasterKey(); - /** - * required string master_key = 1; - */ - java.lang.String getMasterKey(); - /** - * required string master_key = 1; - */ - com.google.protobuf.ByteString - getMasterKeyBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.MasterKey} - */ - public static final class MasterKey extends - com.google.protobuf.GeneratedMessage - implements MasterKeyOrBuilder { - // Use MasterKey.newBuilder() to construct. - private MasterKey(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private MasterKey(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final MasterKey defaultInstance; - public static MasterKey getDefaultInstance() { - return defaultInstance; - } - - public MasterKey getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private MasterKey( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - masterKey_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public MasterKey parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new MasterKey(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string master_key = 1; - public static final int MASTER_KEY_FIELD_NUMBER = 1; - private java.lang.Object masterKey_; - /** - * required string master_key = 1; - */ - public boolean hasMasterKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string master_key = 1; - */ - public java.lang.String getMasterKey() { - java.lang.Object ref = masterKey_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - masterKey_ = s; - } - return s; - } - } - /** - * required string master_key = 1; - */ - public com.google.protobuf.ByteString - getMasterKeyBytes() { - java.lang.Object ref = masterKey_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - masterKey_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - masterKey_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasMasterKey()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getMasterKeyBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getMasterKeyBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.MasterKey} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKeyOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - masterKey_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.masterKey_ = masterKey_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.getDefaultInstance()) return this; - if (other.hasMasterKey()) { - bitField0_ |= 0x00000001; - masterKey_ = other.masterKey_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasMasterKey()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string master_key = 1; - private java.lang.Object masterKey_ = ""; - /** - * required string master_key = 1; - */ - public boolean hasMasterKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string master_key = 1; - */ - public java.lang.String getMasterKey() { - java.lang.Object ref = masterKey_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - masterKey_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string master_key = 1; - */ - public com.google.protobuf.ByteString - getMasterKeyBytes() { - java.lang.Object ref = masterKey_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - masterKey_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string master_key = 1; - */ - public Builder setMasterKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - masterKey_ = value; - onChanged(); - return this; - } - /** - * required string master_key = 1; - */ - public Builder clearMasterKey() { - bitField0_ = (bitField0_ & ~0x00000001); - masterKey_ = getDefaultInstance().getMasterKey(); - onChanged(); - return this; - } - /** - * required string master_key = 1; - */ - public Builder setMasterKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - masterKey_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.MasterKey) - } - - static { - defaultInstance = new MasterKey(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.MasterKey) - } - - public interface ParameterEntryOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string key = 1; - /** - * required string key = 1; - */ - boolean hasKey(); - /** - * required string key = 1; - */ - java.lang.String getKey(); - /** - * required string key = 1; - */ - com.google.protobuf.ByteString - getKeyBytes(); - - // required string value = 2; - /** - * required string value = 2; - */ - boolean hasValue(); - /** - * required string value = 2; - */ - java.lang.String getValue(); - /** - * required string value = 2; - */ - com.google.protobuf.ByteString - getValueBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ParameterEntry} - */ - public static final class ParameterEntry extends - com.google.protobuf.GeneratedMessage - implements ParameterEntryOrBuilder { - // Use ParameterEntry.newBuilder() to construct. - private ParameterEntry(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ParameterEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ParameterEntry defaultInstance; - public static ParameterEntry getDefaultInstance() { - return defaultInstance; - } - - public ParameterEntry getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ParameterEntry( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - key_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - value_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ParameterEntry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ParameterEntry(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string key = 1; - public static final int KEY_FIELD_NUMBER = 1; - private java.lang.Object key_; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private java.lang.Object value_; - /** - * required string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } - } - /** - * required string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - key_ = ""; - value_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasKey()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasValue()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getKeyBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getValueBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getKeyBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getValueBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ParameterEntry} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - key_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.key_ = key_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()) return this; - if (other.hasKey()) { - bitField0_ |= 0x00000001; - key_ = other.key_; - onChanged(); - } - if (other.hasValue()) { - bitField0_ |= 0x00000002; - value_ = other.value_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKey()) { - - return false; - } - if (!hasValue()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string key = 1; - private java.lang.Object key_ = ""; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - key_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string key = 1; - */ - public Builder setKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000001); - key_ = getDefaultInstance().getKey(); - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder setKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - - // required string value = 2; - private java.lang.Object value_ = ""; - /** - * required string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - value_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string value = 2; - */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - /** - * required string value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); - onChanged(); - return this; - } - /** - * required string value = 2; - */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ParameterEntry) - } - - static { - defaultInstance = new ParameterEntry(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ParameterEntry) - } - - public interface ParametersOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - java.util.List - getParameterList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - int getParameterCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - java.util.List - getParameterOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Parameters} - */ - public static final class Parameters extends - com.google.protobuf.GeneratedMessage - implements ParametersOrBuilder { - // Use Parameters.newBuilder() to construct. - private Parameters(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Parameters(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Parameters defaultInstance; - public static Parameters getDefaultInstance() { - return defaultInstance; - } - - public Parameters getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Parameters( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - parameter_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - parameter_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - parameter_ = java.util.Collections.unmodifiableList(parameter_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Parameters parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Parameters(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - public static final int PARAMETER_FIELD_NUMBER = 1; - private java.util.List parameter_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public java.util.List getParameterList() { - return parameter_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public java.util.List - getParameterOrBuilderList() { - return parameter_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public int getParameterCount() { - return parameter_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index) { - return parameter_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( - int index) { - return parameter_.get(index); - } - - private void initFields() { - parameter_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getParameterCount(); i++) { - if (!getParameter(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < parameter_.size(); i++) { - output.writeMessage(1, parameter_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < parameter_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, parameter_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Parameters} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getParameterFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (parameterBuilder_ == null) { - parameter_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - parameterBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters(this); - int from_bitField0_ = bitField0_; - if (parameterBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - parameter_ = java.util.Collections.unmodifiableList(parameter_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.parameter_ = parameter_; - } else { - result.parameter_ = parameterBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) return this; - if (parameterBuilder_ == null) { - if (!other.parameter_.isEmpty()) { - if (parameter_.isEmpty()) { - parameter_ = other.parameter_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureParameterIsMutable(); - parameter_.addAll(other.parameter_); - } - onChanged(); - } - } else { - if (!other.parameter_.isEmpty()) { - if (parameterBuilder_.isEmpty()) { - parameterBuilder_.dispose(); - parameterBuilder_ = null; - parameter_ = other.parameter_; - bitField0_ = (bitField0_ & ~0x00000001); - parameterBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getParameterFieldBuilder() : null; - } else { - parameterBuilder_.addAllMessages(other.parameter_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getParameterCount(); i++) { - if (!getParameter(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - private java.util.List parameter_ = - java.util.Collections.emptyList(); - private void ensureParameterIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - parameter_ = new java.util.ArrayList(parameter_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder> parameterBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public java.util.List getParameterList() { - if (parameterBuilder_ == null) { - return java.util.Collections.unmodifiableList(parameter_); - } else { - return parameterBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public int getParameterCount() { - if (parameterBuilder_ == null) { - return parameter_.size(); - } else { - return parameterBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index) { - if (parameterBuilder_ == null) { - return parameter_.get(index); - } else { - return parameterBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder setParameter( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { - if (parameterBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureParameterIsMutable(); - parameter_.set(index, value); - onChanged(); - } else { - parameterBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder setParameter( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { - if (parameterBuilder_ == null) { - ensureParameterIsMutable(); - parameter_.set(index, builderForValue.build()); - onChanged(); - } else { - parameterBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder addParameter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { - if (parameterBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureParameterIsMutable(); - parameter_.add(value); - onChanged(); - } else { - parameterBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder addParameter( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { - if (parameterBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureParameterIsMutable(); - parameter_.add(index, value); - onChanged(); - } else { - parameterBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder addParameter( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { - if (parameterBuilder_ == null) { - ensureParameterIsMutable(); - parameter_.add(builderForValue.build()); - onChanged(); - } else { - parameterBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder addParameter( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { - if (parameterBuilder_ == null) { - ensureParameterIsMutable(); - parameter_.add(index, builderForValue.build()); - onChanged(); - } else { - parameterBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder addAllParameter( - java.lang.Iterable values) { - if (parameterBuilder_ == null) { - ensureParameterIsMutable(); - super.addAll(values, parameter_); - onChanged(); - } else { - parameterBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder clearParameter() { - if (parameterBuilder_ == null) { - parameter_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - parameterBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public Builder removeParameter(int index) { - if (parameterBuilder_ == null) { - ensureParameterIsMutable(); - parameter_.remove(index); - onChanged(); - } else { - parameterBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder getParameterBuilder( - int index) { - return getParameterFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( - int index) { - if (parameterBuilder_ == null) { - return parameter_.get(index); } else { - return parameterBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public java.util.List - getParameterOrBuilderList() { - if (parameterBuilder_ != null) { - return parameterBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(parameter_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder addParameterBuilder() { - return getParameterFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder addParameterBuilder( - int index) { - return getParameterFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; - */ - public java.util.List - getParameterBuilderList() { - return getParameterFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder> - getParameterFieldBuilder() { - if (parameterBuilder_ == null) { - parameterBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder>( - parameter_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - parameter_ = null; - } - return parameterBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Parameters) - } - - static { - defaultInstance = new Parameters(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Parameters) - } - - public interface PartitionOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional int64 create_time = 1; - /** - * optional int64 create_time = 1; - */ - boolean hasCreateTime(); - /** - * optional int64 create_time = 1; - */ - long getCreateTime(); - - // optional int64 last_access_time = 2; - /** - * optional int64 last_access_time = 2; - */ - boolean hasLastAccessTime(); - /** - * optional int64 last_access_time = 2; - */ - long getLastAccessTime(); - - // optional string location = 3; - /** - * optional string location = 3; - */ - boolean hasLocation(); - /** - * optional string location = 3; - */ - java.lang.String getLocation(); - /** - * optional string location = 3; - */ - com.google.protobuf.ByteString - getLocationBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-     * storage descriptor parameters
-     * 
- */ - boolean hasSdParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-     * storage descriptor parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-     * storage descriptor parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); - - // required bytes sd_hash = 5; - /** - * required bytes sd_hash = 5; - */ - boolean hasSdHash(); - /** - * required bytes sd_hash = 5; - */ - com.google.protobuf.ByteString getSdHash(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-     * partition parameters
-     * 
- */ - boolean hasParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-     * partition parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-     * partition parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} - */ - public static final class Partition extends - com.google.protobuf.GeneratedMessage - implements PartitionOrBuilder { - // Use Partition.newBuilder() to construct. - private Partition(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Partition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Partition defaultInstance; - public static Partition getDefaultInstance() { - return defaultInstance; - } - - public Partition getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Partition( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - createTime_ = input.readInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - lastAccessTime_ = input.readInt64(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - location_ = input.readBytes(); - break; - } - case 34: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000008) == 0x00000008)) { - subBuilder = sdParameters_.toBuilder(); - } - sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(sdParameters_); - sdParameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000008; - break; - } - case 42: { - bitField0_ |= 0x00000010; - sdHash_ = input.readBytes(); - break; - } - case 50: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000020) == 0x00000020)) { - subBuilder = parameters_.toBuilder(); - } - parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(parameters_); - parameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000020; - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Partition parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Partition(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional int64 create_time = 1; - public static final int CREATE_TIME_FIELD_NUMBER = 1; - private long createTime_; - /** - * optional int64 create_time = 1; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 create_time = 1; - */ - public long getCreateTime() { - return createTime_; - } - - // optional int64 last_access_time = 2; - public static final int LAST_ACCESS_TIME_FIELD_NUMBER = 2; - private long lastAccessTime_; - /** - * optional int64 last_access_time = 2; - */ - public boolean hasLastAccessTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 last_access_time = 2; - */ - public long getLastAccessTime() { - return lastAccessTime_; - } - - // optional string location = 3; - public static final int LOCATION_FIELD_NUMBER = 3; - private java.lang.Object location_; - /** - * optional string location = 3; - */ - public boolean hasLocation() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string location = 3; - */ - public java.lang.String getLocation() { - java.lang.Object ref = location_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - location_ = s; - } - return s; - } - } - /** - * optional string location = 3; - */ - public com.google.protobuf.ByteString - getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - public static final int SD_PARAMETERS_FIELD_NUMBER = 4; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-     * storage descriptor parameters
-     * 
- */ - public boolean hasSdParameters() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-     * storage descriptor parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { - return sdParameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-     * storage descriptor parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { - return sdParameters_; - } - - // required bytes sd_hash = 5; - public static final int SD_HASH_FIELD_NUMBER = 5; - private com.google.protobuf.ByteString sdHash_; - /** - * required bytes sd_hash = 5; - */ - public boolean hasSdHash() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * required bytes sd_hash = 5; - */ - public com.google.protobuf.ByteString getSdHash() { - return sdHash_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - public static final int PARAMETERS_FIELD_NUMBER = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-     * partition parameters
-     * 
- */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-     * partition parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - return parameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-     * partition parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - return parameters_; - } - - private void initFields() { - createTime_ = 0L; - lastAccessTime_ = 0L; - location_ = ""; - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - sdHash_ = com.google.protobuf.ByteString.EMPTY; - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSdHash()) { - memoizedIsInitialized = 0; - return false; - } - if (hasSdParameters()) { - if (!getSdParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasParameters()) { - if (!getParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, createTime_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, lastAccessTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getLocationBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, sdParameters_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, sdHash_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeMessage(6, parameters_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, createTime_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, lastAccessTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getLocationBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, sdParameters_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, sdHash_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, parameters_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSdParametersFieldBuilder(); - getParametersFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - lastAccessTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - location_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - if (sdParametersBuilder_ == null) { - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - sdParametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - sdHash_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000010); - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.lastAccessTime_ = lastAccessTime_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.location_ = location_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (sdParametersBuilder_ == null) { - result.sdParameters_ = sdParameters_; - } else { - result.sdParameters_ = sdParametersBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.sdHash_ = sdHash_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - if (parametersBuilder_ == null) { - result.parameters_ = parameters_; - } else { - result.parameters_ = parametersBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.getDefaultInstance()) return this; - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasLastAccessTime()) { - setLastAccessTime(other.getLastAccessTime()); - } - if (other.hasLocation()) { - bitField0_ |= 0x00000004; - location_ = other.location_; - onChanged(); - } - if (other.hasSdParameters()) { - mergeSdParameters(other.getSdParameters()); - } - if (other.hasSdHash()) { - setSdHash(other.getSdHash()); - } - if (other.hasParameters()) { - mergeParameters(other.getParameters()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSdHash()) { - - return false; - } - if (hasSdParameters()) { - if (!getSdParameters().isInitialized()) { - - return false; - } - } - if (hasParameters()) { - if (!getParameters().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional int64 create_time = 1; - private long createTime_ ; - /** - * optional int64 create_time = 1; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 create_time = 1; - */ - public long getCreateTime() { - return createTime_; - } - /** - * optional int64 create_time = 1; - */ - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000001; - createTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 create_time = 1; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000001); - createTime_ = 0L; - onChanged(); - return this; - } - - // optional int64 last_access_time = 2; - private long lastAccessTime_ ; - /** - * optional int64 last_access_time = 2; - */ - public boolean hasLastAccessTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 last_access_time = 2; - */ - public long getLastAccessTime() { - return lastAccessTime_; - } - /** - * optional int64 last_access_time = 2; - */ - public Builder setLastAccessTime(long value) { - bitField0_ |= 0x00000002; - lastAccessTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 last_access_time = 2; - */ - public Builder clearLastAccessTime() { - bitField0_ = (bitField0_ & ~0x00000002); - lastAccessTime_ = 0L; - onChanged(); - return this; - } - - // optional string location = 3; - private java.lang.Object location_ = ""; - /** - * optional string location = 3; - */ - public boolean hasLocation() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string location = 3; - */ - public java.lang.String getLocation() { - java.lang.Object ref = location_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - location_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string location = 3; - */ - public com.google.protobuf.ByteString - getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string location = 3; - */ - public Builder setLocation( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - location_ = value; - onChanged(); - return this; - } - /** - * optional string location = 3; - */ - public Builder clearLocation() { - bitField0_ = (bitField0_ & ~0x00000004); - location_ = getDefaultInstance().getLocation(); - onChanged(); - return this; - } - /** - * optional string location = 3; - */ - public Builder setLocationBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - location_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public boolean hasSdParameters() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { - if (sdParametersBuilder_ == null) { - return sdParameters_; - } else { - return sdParametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (sdParametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - sdParameters_ = value; - onChanged(); - } else { - sdParametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder setSdParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (sdParametersBuilder_ == null) { - sdParameters_ = builderForValue.build(); - onChanged(); - } else { - sdParametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (sdParametersBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - sdParameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); - } else { - sdParameters_ = value; - } - onChanged(); - } else { - sdParametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder clearSdParameters() { - if (sdParametersBuilder_ == null) { - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - sdParametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getSdParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { - if (sdParametersBuilder_ != null) { - return sdParametersBuilder_.getMessageOrBuilder(); - } else { - return sdParameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; - * - *
-       * storage descriptor parameters
-       * 
- */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getSdParametersFieldBuilder() { - if (sdParametersBuilder_ == null) { - sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - sdParameters_, - getParentForChildren(), - isClean()); - sdParameters_ = null; - } - return sdParametersBuilder_; - } - - // required bytes sd_hash = 5; - private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes sd_hash = 5; - */ - public boolean hasSdHash() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * required bytes sd_hash = 5; - */ - public com.google.protobuf.ByteString getSdHash() { - return sdHash_; - } - /** - * required bytes sd_hash = 5; - */ - public Builder setSdHash(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - sdHash_ = value; - onChanged(); - return this; - } - /** - * required bytes sd_hash = 5; - */ - public Builder clearSdHash() { - bitField0_ = (bitField0_ & ~0x00000010); - sdHash_ = getDefaultInstance().getSdHash(); - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - if (parametersBuilder_ == null) { - return parameters_; - } else { - return parametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - parameters_ = value; - onChanged(); - } else { - parametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public Builder setParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (parametersBuilder_ == null) { - parameters_ = builderForValue.build(); - onChanged(); - } else { - parametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020) && - parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - parameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); - } else { - parameters_ = value; - } - onChanged(); - } else { - parametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public Builder clearParameters() { - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { - bitField0_ |= 0x00000020; - onChanged(); - return getParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - if (parametersBuilder_ != null) { - return parametersBuilder_.getMessageOrBuilder(); - } else { - return parameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; - * - *
-       * partition parameters
-       * 
- */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getParametersFieldBuilder() { - if (parametersBuilder_ == null) { - parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - parameters_, - getParentForChildren(), - isClean()); - parameters_ = null; - } - return parametersBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Partition) - } - - static { - defaultInstance = new Partition(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Partition) - } - - public interface PrincipalPrivilegeSetEntryOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string principal_name = 1; - /** - * required string principal_name = 1; - */ - boolean hasPrincipalName(); - /** - * required string principal_name = 1; - */ - java.lang.String getPrincipalName(); - /** - * required string principal_name = 1; - */ - com.google.protobuf.ByteString - getPrincipalNameBytes(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - java.util.List - getPrivilegesList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - int getPrivilegesCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - java.util.List - getPrivilegesOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry} - */ - public static final class PrincipalPrivilegeSetEntry extends - com.google.protobuf.GeneratedMessage - implements PrincipalPrivilegeSetEntryOrBuilder { - // Use PrincipalPrivilegeSetEntry.newBuilder() to construct. - private PrincipalPrivilegeSetEntry(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private PrincipalPrivilegeSetEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final PrincipalPrivilegeSetEntry defaultInstance; - public static PrincipalPrivilegeSetEntry getDefaultInstance() { - return defaultInstance; - } - - public PrincipalPrivilegeSetEntry getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private PrincipalPrivilegeSetEntry( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - principalName_ = input.readBytes(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - privileges_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - privileges_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - privileges_ = java.util.Collections.unmodifiableList(privileges_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public PrincipalPrivilegeSetEntry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PrincipalPrivilegeSetEntry(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string principal_name = 1; - public static final int PRINCIPAL_NAME_FIELD_NUMBER = 1; - private java.lang.Object principalName_; - /** - * required string principal_name = 1; - */ - public boolean hasPrincipalName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string principal_name = 1; - */ - public java.lang.String getPrincipalName() { - java.lang.Object ref = principalName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - principalName_ = s; - } - return s; - } - } - /** - * required string principal_name = 1; - */ - public com.google.protobuf.ByteString - getPrincipalNameBytes() { - java.lang.Object ref = principalName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - principalName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - public static final int PRIVILEGES_FIELD_NUMBER = 2; - private java.util.List privileges_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public java.util.List getPrivilegesList() { - return privileges_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public java.util.List - getPrivilegesOrBuilderList() { - return privileges_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public int getPrivilegesCount() { - return privileges_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index) { - return privileges_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( - int index) { - return privileges_.get(index); - } - - private void initFields() { - principalName_ = ""; - privileges_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPrincipalName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPrincipalNameBytes()); - } - for (int i = 0; i < privileges_.size(); i++) { - output.writeMessage(2, privileges_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPrincipalNameBytes()); - } - for (int i = 0; i < privileges_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, privileges_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getPrivilegesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - principalName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (privilegesBuilder_ == null) { - privileges_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - privilegesBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.principalName_ = principalName_; - if (privilegesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - privileges_ = java.util.Collections.unmodifiableList(privileges_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.privileges_ = privileges_; - } else { - result.privileges_ = privilegesBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()) return this; - if (other.hasPrincipalName()) { - bitField0_ |= 0x00000001; - principalName_ = other.principalName_; - onChanged(); - } - if (privilegesBuilder_ == null) { - if (!other.privileges_.isEmpty()) { - if (privileges_.isEmpty()) { - privileges_ = other.privileges_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensurePrivilegesIsMutable(); - privileges_.addAll(other.privileges_); - } - onChanged(); - } - } else { - if (!other.privileges_.isEmpty()) { - if (privilegesBuilder_.isEmpty()) { - privilegesBuilder_.dispose(); - privilegesBuilder_ = null; - privileges_ = other.privileges_; - bitField0_ = (bitField0_ & ~0x00000002); - privilegesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getPrivilegesFieldBuilder() : null; - } else { - privilegesBuilder_.addAllMessages(other.privileges_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPrincipalName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string principal_name = 1; - private java.lang.Object principalName_ = ""; - /** - * required string principal_name = 1; - */ - public boolean hasPrincipalName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string principal_name = 1; - */ - public java.lang.String getPrincipalName() { - java.lang.Object ref = principalName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - principalName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string principal_name = 1; - */ - public com.google.protobuf.ByteString - getPrincipalNameBytes() { - java.lang.Object ref = principalName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - principalName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string principal_name = 1; - */ - public Builder setPrincipalName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - principalName_ = value; - onChanged(); - return this; - } - /** - * required string principal_name = 1; - */ - public Builder clearPrincipalName() { - bitField0_ = (bitField0_ & ~0x00000001); - principalName_ = getDefaultInstance().getPrincipalName(); - onChanged(); - return this; - } - /** - * required string principal_name = 1; - */ - public Builder setPrincipalNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - principalName_ = value; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - private java.util.List privileges_ = - java.util.Collections.emptyList(); - private void ensurePrivilegesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - privileges_ = new java.util.ArrayList(privileges_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder> privilegesBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public java.util.List getPrivilegesList() { - if (privilegesBuilder_ == null) { - return java.util.Collections.unmodifiableList(privileges_); - } else { - return privilegesBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public int getPrivilegesCount() { - if (privilegesBuilder_ == null) { - return privileges_.size(); - } else { - return privilegesBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index) { - if (privilegesBuilder_ == null) { - return privileges_.get(index); - } else { - return privilegesBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder setPrivileges( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { - if (privilegesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePrivilegesIsMutable(); - privileges_.set(index, value); - onChanged(); - } else { - privilegesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder setPrivileges( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { - if (privilegesBuilder_ == null) { - ensurePrivilegesIsMutable(); - privileges_.set(index, builderForValue.build()); - onChanged(); - } else { - privilegesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder addPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { - if (privilegesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePrivilegesIsMutable(); - privileges_.add(value); - onChanged(); - } else { - privilegesBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder addPrivileges( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { - if (privilegesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePrivilegesIsMutable(); - privileges_.add(index, value); - onChanged(); - } else { - privilegesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder addPrivileges( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { - if (privilegesBuilder_ == null) { - ensurePrivilegesIsMutable(); - privileges_.add(builderForValue.build()); - onChanged(); - } else { - privilegesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder addPrivileges( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { - if (privilegesBuilder_ == null) { - ensurePrivilegesIsMutable(); - privileges_.add(index, builderForValue.build()); - onChanged(); - } else { - privilegesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder addAllPrivileges( - java.lang.Iterable values) { - if (privilegesBuilder_ == null) { - ensurePrivilegesIsMutable(); - super.addAll(values, privileges_); - onChanged(); - } else { - privilegesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder clearPrivileges() { - if (privilegesBuilder_ == null) { - privileges_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - privilegesBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public Builder removePrivileges(int index) { - if (privilegesBuilder_ == null) { - ensurePrivilegesIsMutable(); - privileges_.remove(index); - onChanged(); - } else { - privilegesBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder getPrivilegesBuilder( - int index) { - return getPrivilegesFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( - int index) { - if (privilegesBuilder_ == null) { - return privileges_.get(index); } else { - return privilegesBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public java.util.List - getPrivilegesOrBuilderList() { - if (privilegesBuilder_ != null) { - return privilegesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(privileges_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder addPrivilegesBuilder() { - return getPrivilegesFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder addPrivilegesBuilder( - int index) { - return getPrivilegesFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; - */ - public java.util.List - getPrivilegesBuilderList() { - return getPrivilegesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder> - getPrivilegesFieldBuilder() { - if (privilegesBuilder_ == null) { - privilegesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder>( - privileges_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - privileges_ = null; - } - return privilegesBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry) - } - - static { - defaultInstance = new PrincipalPrivilegeSetEntry(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry) - } - - public interface PrincipalPrivilegeSetOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - java.util.List - getUsersList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - int getUsersCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - java.util.List - getUsersOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( - int index); - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - java.util.List - getRolesList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - int getRolesCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - java.util.List - getRolesOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet} - */ - public static final class PrincipalPrivilegeSet extends - com.google.protobuf.GeneratedMessage - implements PrincipalPrivilegeSetOrBuilder { - // Use PrincipalPrivilegeSet.newBuilder() to construct. - private PrincipalPrivilegeSet(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private PrincipalPrivilegeSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final PrincipalPrivilegeSet defaultInstance; - public static PrincipalPrivilegeSet getDefaultInstance() { - return defaultInstance; - } - - public PrincipalPrivilegeSet getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private PrincipalPrivilegeSet( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - users_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - users_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - roles_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - roles_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - users_ = java.util.Collections.unmodifiableList(users_); - } - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - roles_ = java.util.Collections.unmodifiableList(roles_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public PrincipalPrivilegeSet parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PrincipalPrivilegeSet(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - public static final int USERS_FIELD_NUMBER = 1; - private java.util.List users_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public java.util.List getUsersList() { - return users_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public java.util.List - getUsersOrBuilderList() { - return users_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public int getUsersCount() { - return users_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index) { - return users_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( - int index) { - return users_.get(index); - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - public static final int ROLES_FIELD_NUMBER = 2; - private java.util.List roles_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public java.util.List getRolesList() { - return roles_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public java.util.List - getRolesOrBuilderList() { - return roles_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public int getRolesCount() { - return roles_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { - return roles_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( - int index) { - return roles_.get(index); - } - - private void initFields() { - users_ = java.util.Collections.emptyList(); - roles_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getUsersCount(); i++) { - if (!getUsers(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getRolesCount(); i++) { - if (!getRoles(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < users_.size(); i++) { - output.writeMessage(1, users_.get(i)); - } - for (int i = 0; i < roles_.size(); i++) { - output.writeMessage(2, roles_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < users_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, users_.get(i)); - } - for (int i = 0; i < roles_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, roles_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getUsersFieldBuilder(); - getRolesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (usersBuilder_ == null) { - users_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - usersBuilder_.clear(); - } - if (rolesBuilder_ == null) { - roles_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - rolesBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet(this); - int from_bitField0_ = bitField0_; - if (usersBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - users_ = java.util.Collections.unmodifiableList(users_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.users_ = users_; - } else { - result.users_ = usersBuilder_.build(); - } - if (rolesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - roles_ = java.util.Collections.unmodifiableList(roles_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.roles_ = roles_; - } else { - result.roles_ = rolesBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) return this; - if (usersBuilder_ == null) { - if (!other.users_.isEmpty()) { - if (users_.isEmpty()) { - users_ = other.users_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureUsersIsMutable(); - users_.addAll(other.users_); - } - onChanged(); - } - } else { - if (!other.users_.isEmpty()) { - if (usersBuilder_.isEmpty()) { - usersBuilder_.dispose(); - usersBuilder_ = null; - users_ = other.users_; - bitField0_ = (bitField0_ & ~0x00000001); - usersBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getUsersFieldBuilder() : null; - } else { - usersBuilder_.addAllMessages(other.users_); - } - } - } - if (rolesBuilder_ == null) { - if (!other.roles_.isEmpty()) { - if (roles_.isEmpty()) { - roles_ = other.roles_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureRolesIsMutable(); - roles_.addAll(other.roles_); - } - onChanged(); - } - } else { - if (!other.roles_.isEmpty()) { - if (rolesBuilder_.isEmpty()) { - rolesBuilder_.dispose(); - rolesBuilder_ = null; - roles_ = other.roles_; - bitField0_ = (bitField0_ & ~0x00000002); - rolesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getRolesFieldBuilder() : null; - } else { - rolesBuilder_.addAllMessages(other.roles_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getUsersCount(); i++) { - if (!getUsers(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getRolesCount(); i++) { - if (!getRoles(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - private java.util.List users_ = - java.util.Collections.emptyList(); - private void ensureUsersIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - users_ = new java.util.ArrayList(users_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> usersBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public java.util.List getUsersList() { - if (usersBuilder_ == null) { - return java.util.Collections.unmodifiableList(users_); - } else { - return usersBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public int getUsersCount() { - if (usersBuilder_ == null) { - return users_.size(); - } else { - return usersBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index) { - if (usersBuilder_ == null) { - return users_.get(index); - } else { - return usersBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder setUsers( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { - if (usersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureUsersIsMutable(); - users_.set(index, value); - onChanged(); - } else { - usersBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder setUsers( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { - if (usersBuilder_ == null) { - ensureUsersIsMutable(); - users_.set(index, builderForValue.build()); - onChanged(); - } else { - usersBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder addUsers(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { - if (usersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureUsersIsMutable(); - users_.add(value); - onChanged(); - } else { - usersBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder addUsers( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { - if (usersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureUsersIsMutable(); - users_.add(index, value); - onChanged(); - } else { - usersBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder addUsers( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { - if (usersBuilder_ == null) { - ensureUsersIsMutable(); - users_.add(builderForValue.build()); - onChanged(); - } else { - usersBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder addUsers( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { - if (usersBuilder_ == null) { - ensureUsersIsMutable(); - users_.add(index, builderForValue.build()); - onChanged(); - } else { - usersBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder addAllUsers( - java.lang.Iterable values) { - if (usersBuilder_ == null) { - ensureUsersIsMutable(); - super.addAll(values, users_); - onChanged(); - } else { - usersBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder clearUsers() { - if (usersBuilder_ == null) { - users_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - usersBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public Builder removeUsers(int index) { - if (usersBuilder_ == null) { - ensureUsersIsMutable(); - users_.remove(index); - onChanged(); - } else { - usersBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getUsersBuilder( - int index) { - return getUsersFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( - int index) { - if (usersBuilder_ == null) { - return users_.get(index); } else { - return usersBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public java.util.List - getUsersOrBuilderList() { - if (usersBuilder_ != null) { - return usersBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(users_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addUsersBuilder() { - return getUsersFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addUsersBuilder( - int index) { - return getUsersFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; - */ - public java.util.List - getUsersBuilderList() { - return getUsersFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> - getUsersFieldBuilder() { - if (usersBuilder_ == null) { - usersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( - users_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - users_ = null; - } - return usersBuilder_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - private java.util.List roles_ = - java.util.Collections.emptyList(); - private void ensureRolesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - roles_ = new java.util.ArrayList(roles_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> rolesBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public java.util.List getRolesList() { - if (rolesBuilder_ == null) { - return java.util.Collections.unmodifiableList(roles_); - } else { - return rolesBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public int getRolesCount() { - if (rolesBuilder_ == null) { - return roles_.size(); - } else { - return rolesBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { - if (rolesBuilder_ == null) { - return roles_.get(index); - } else { - return rolesBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder setRoles( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { - if (rolesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRolesIsMutable(); - roles_.set(index, value); - onChanged(); - } else { - rolesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder setRoles( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { - if (rolesBuilder_ == null) { - ensureRolesIsMutable(); - roles_.set(index, builderForValue.build()); - onChanged(); - } else { - rolesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder addRoles(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { - if (rolesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRolesIsMutable(); - roles_.add(value); - onChanged(); - } else { - rolesBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder addRoles( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { - if (rolesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRolesIsMutable(); - roles_.add(index, value); - onChanged(); - } else { - rolesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder addRoles( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { - if (rolesBuilder_ == null) { - ensureRolesIsMutable(); - roles_.add(builderForValue.build()); - onChanged(); - } else { - rolesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder addRoles( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { - if (rolesBuilder_ == null) { - ensureRolesIsMutable(); - roles_.add(index, builderForValue.build()); - onChanged(); - } else { - rolesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder addAllRoles( - java.lang.Iterable values) { - if (rolesBuilder_ == null) { - ensureRolesIsMutable(); - super.addAll(values, roles_); - onChanged(); - } else { - rolesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder clearRoles() { - if (rolesBuilder_ == null) { - roles_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - rolesBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public Builder removeRoles(int index) { - if (rolesBuilder_ == null) { - ensureRolesIsMutable(); - roles_.remove(index); - onChanged(); - } else { - rolesBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getRolesBuilder( - int index) { - return getRolesFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( - int index) { - if (rolesBuilder_ == null) { - return roles_.get(index); } else { - return rolesBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public java.util.List - getRolesOrBuilderList() { - if (rolesBuilder_ != null) { - return rolesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(roles_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder() { - return getRolesFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder( - int index) { - return getRolesFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; - */ - public java.util.List - getRolesBuilderList() { - return getRolesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> - getRolesFieldBuilder() { - if (rolesBuilder_ == null) { - rolesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( - roles_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - roles_ = null; - } - return rolesBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet) - } - - static { - defaultInstance = new PrincipalPrivilegeSet(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet) - } - - public interface PrivilegeGrantInfoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string privilege = 1; - /** - * optional string privilege = 1; - */ - boolean hasPrivilege(); - /** - * optional string privilege = 1; - */ - java.lang.String getPrivilege(); - /** - * optional string privilege = 1; - */ - com.google.protobuf.ByteString - getPrivilegeBytes(); - - // optional int64 create_time = 2; - /** - * optional int64 create_time = 2; - */ - boolean hasCreateTime(); - /** - * optional int64 create_time = 2; - */ - long getCreateTime(); - - // optional string grantor = 3; - /** - * optional string grantor = 3; - */ - boolean hasGrantor(); - /** - * optional string grantor = 3; - */ - java.lang.String getGrantor(); - /** - * optional string grantor = 3; - */ - com.google.protobuf.ByteString - getGrantorBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - boolean hasGrantorType(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType(); - - // optional bool grant_option = 5; - /** - * optional bool grant_option = 5; - */ - boolean hasGrantOption(); - /** - * optional bool grant_option = 5; - */ - boolean getGrantOption(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo} - */ - public static final class PrivilegeGrantInfo extends - com.google.protobuf.GeneratedMessage - implements PrivilegeGrantInfoOrBuilder { - // Use PrivilegeGrantInfo.newBuilder() to construct. - private PrivilegeGrantInfo(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private PrivilegeGrantInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final PrivilegeGrantInfo defaultInstance; - public static PrivilegeGrantInfo getDefaultInstance() { - return defaultInstance; - } - - public PrivilegeGrantInfo getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private PrivilegeGrantInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - privilege_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - createTime_ = input.readInt64(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - grantor_ = input.readBytes(); - break; - } - case 32: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - grantorType_ = value; - } - break; - } - case 40: { - bitField0_ |= 0x00000010; - grantOption_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public PrivilegeGrantInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PrivilegeGrantInfo(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string privilege = 1; - public static final int PRIVILEGE_FIELD_NUMBER = 1; - private java.lang.Object privilege_; - /** - * optional string privilege = 1; - */ - public boolean hasPrivilege() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string privilege = 1; - */ - public java.lang.String getPrivilege() { - java.lang.Object ref = privilege_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - privilege_ = s; - } - return s; - } - } - /** - * optional string privilege = 1; - */ - public com.google.protobuf.ByteString - getPrivilegeBytes() { - java.lang.Object ref = privilege_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - privilege_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional int64 create_time = 2; - public static final int CREATE_TIME_FIELD_NUMBER = 2; - private long createTime_; - /** - * optional int64 create_time = 2; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 create_time = 2; - */ - public long getCreateTime() { - return createTime_; - } - - // optional string grantor = 3; - public static final int GRANTOR_FIELD_NUMBER = 3; - private java.lang.Object grantor_; - /** - * optional string grantor = 3; - */ - public boolean hasGrantor() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string grantor = 3; - */ - public java.lang.String getGrantor() { - java.lang.Object ref = grantor_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - grantor_ = s; - } - return s; - } - } - /** - * optional string grantor = 3; - */ - public com.google.protobuf.ByteString - getGrantorBytes() { - java.lang.Object ref = grantor_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - grantor_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - public static final int GRANTOR_TYPE_FIELD_NUMBER = 4; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - public boolean hasGrantorType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { - return grantorType_; - } - - // optional bool grant_option = 5; - public static final int GRANT_OPTION_FIELD_NUMBER = 5; - private boolean grantOption_; - /** - * optional bool grant_option = 5; - */ - public boolean hasGrantOption() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool grant_option = 5; - */ - public boolean getGrantOption() { - return grantOption_; - } - - private void initFields() { - privilege_ = ""; - createTime_ = 0L; - grantor_ = ""; - grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - grantOption_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPrivilegeBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, createTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getGrantorBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, grantorType_.getNumber()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, grantOption_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPrivilegeBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, createTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getGrantorBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, grantorType_.getNumber()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, grantOption_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - privilege_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - grantor_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - bitField0_ = (bitField0_ & ~0x00000008); - grantOption_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.privilege_ = privilege_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.grantor_ = grantor_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.grantorType_ = grantorType_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.grantOption_ = grantOption_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()) return this; - if (other.hasPrivilege()) { - bitField0_ |= 0x00000001; - privilege_ = other.privilege_; - onChanged(); - } - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasGrantor()) { - bitField0_ |= 0x00000004; - grantor_ = other.grantor_; - onChanged(); - } - if (other.hasGrantorType()) { - setGrantorType(other.getGrantorType()); - } - if (other.hasGrantOption()) { - setGrantOption(other.getGrantOption()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional string privilege = 1; - private java.lang.Object privilege_ = ""; - /** - * optional string privilege = 1; - */ - public boolean hasPrivilege() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string privilege = 1; - */ - public java.lang.String getPrivilege() { - java.lang.Object ref = privilege_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - privilege_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string privilege = 1; - */ - public com.google.protobuf.ByteString - getPrivilegeBytes() { - java.lang.Object ref = privilege_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - privilege_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string privilege = 1; - */ - public Builder setPrivilege( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - privilege_ = value; - onChanged(); - return this; - } - /** - * optional string privilege = 1; - */ - public Builder clearPrivilege() { - bitField0_ = (bitField0_ & ~0x00000001); - privilege_ = getDefaultInstance().getPrivilege(); - onChanged(); - return this; - } - /** - * optional string privilege = 1; - */ - public Builder setPrivilegeBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - privilege_ = value; - onChanged(); - return this; - } - - // optional int64 create_time = 2; - private long createTime_ ; - /** - * optional int64 create_time = 2; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 create_time = 2; - */ - public long getCreateTime() { - return createTime_; - } - /** - * optional int64 create_time = 2; - */ - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000002; - createTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 create_time = 2; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000002); - createTime_ = 0L; - onChanged(); - return this; - } - - // optional string grantor = 3; - private java.lang.Object grantor_ = ""; - /** - * optional string grantor = 3; - */ - public boolean hasGrantor() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string grantor = 3; - */ - public java.lang.String getGrantor() { - java.lang.Object ref = grantor_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - grantor_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string grantor = 3; - */ - public com.google.protobuf.ByteString - getGrantorBytes() { - java.lang.Object ref = grantor_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - grantor_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string grantor = 3; - */ - public Builder setGrantor( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - grantor_ = value; - onChanged(); - return this; - } - /** - * optional string grantor = 3; - */ - public Builder clearGrantor() { - bitField0_ = (bitField0_ & ~0x00000004); - grantor_ = getDefaultInstance().getGrantor(); - onChanged(); - return this; - } - /** - * optional string grantor = 3; - */ - public Builder setGrantorBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - grantor_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - public boolean hasGrantorType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { - return grantorType_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - public Builder setGrantorType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - grantorType_ = value; - onChanged(); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; - */ - public Builder clearGrantorType() { - bitField0_ = (bitField0_ & ~0x00000008); - grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - onChanged(); - return this; - } - - // optional bool grant_option = 5; - private boolean grantOption_ ; - /** - * optional bool grant_option = 5; - */ - public boolean hasGrantOption() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool grant_option = 5; - */ - public boolean getGrantOption() { - return grantOption_; - } - /** - * optional bool grant_option = 5; - */ - public Builder setGrantOption(boolean value) { - bitField0_ |= 0x00000010; - grantOption_ = value; - onChanged(); - return this; - } - /** - * optional bool grant_option = 5; - */ - public Builder clearGrantOption() { - bitField0_ = (bitField0_ & ~0x00000010); - grantOption_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo) - } - - static { - defaultInstance = new PrivilegeGrantInfo(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo) - } - - public interface RoleGrantInfoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string principal_name = 1; - /** - * required string principal_name = 1; - */ - boolean hasPrincipalName(); - /** - * required string principal_name = 1; - */ - java.lang.String getPrincipalName(); - /** - * required string principal_name = 1; - */ - com.google.protobuf.ByteString - getPrincipalNameBytes(); - - // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - boolean hasPrincipalType(); - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType(); - - // optional int64 add_time = 3; - /** - * optional int64 add_time = 3; - */ - boolean hasAddTime(); - /** - * optional int64 add_time = 3; - */ - long getAddTime(); - - // optional string grantor = 4; - /** - * optional string grantor = 4; - */ - boolean hasGrantor(); - /** - * optional string grantor = 4; - */ - java.lang.String getGrantor(); - /** - * optional string grantor = 4; - */ - com.google.protobuf.ByteString - getGrantorBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - boolean hasGrantorType(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType(); - - // optional bool grant_option = 6; - /** - * optional bool grant_option = 6; - */ - boolean hasGrantOption(); - /** - * optional bool grant_option = 6; - */ - boolean getGrantOption(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo} - */ - public static final class RoleGrantInfo extends - com.google.protobuf.GeneratedMessage - implements RoleGrantInfoOrBuilder { - // Use RoleGrantInfo.newBuilder() to construct. - private RoleGrantInfo(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RoleGrantInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RoleGrantInfo defaultInstance; - public static RoleGrantInfo getDefaultInstance() { - return defaultInstance; - } - - public RoleGrantInfo getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RoleGrantInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - principalName_ = input.readBytes(); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - principalType_ = value; - } - break; - } - case 24: { - bitField0_ |= 0x00000004; - addTime_ = input.readInt64(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - grantor_ = input.readBytes(); - break; - } - case 40: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(5, rawValue); - } else { - bitField0_ |= 0x00000010; - grantorType_ = value; - } - break; - } - case 48: { - bitField0_ |= 0x00000020; - grantOption_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RoleGrantInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RoleGrantInfo(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string principal_name = 1; - public static final int PRINCIPAL_NAME_FIELD_NUMBER = 1; - private java.lang.Object principalName_; - /** - * required string principal_name = 1; - */ - public boolean hasPrincipalName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string principal_name = 1; - */ - public java.lang.String getPrincipalName() { - java.lang.Object ref = principalName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - principalName_ = s; - } - return s; - } - } - /** - * required string principal_name = 1; - */ - public com.google.protobuf.ByteString - getPrincipalNameBytes() { - java.lang.Object ref = principalName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - principalName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - public static final int PRINCIPAL_TYPE_FIELD_NUMBER = 2; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType principalType_; - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - public boolean hasPrincipalType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType() { - return principalType_; - } - - // optional int64 add_time = 3; - public static final int ADD_TIME_FIELD_NUMBER = 3; - private long addTime_; - /** - * optional int64 add_time = 3; - */ - public boolean hasAddTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 add_time = 3; - */ - public long getAddTime() { - return addTime_; - } - - // optional string grantor = 4; - public static final int GRANTOR_FIELD_NUMBER = 4; - private java.lang.Object grantor_; - /** - * optional string grantor = 4; - */ - public boolean hasGrantor() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional string grantor = 4; - */ - public java.lang.String getGrantor() { - java.lang.Object ref = grantor_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - grantor_ = s; - } - return s; - } - } - /** - * optional string grantor = 4; - */ - public com.google.protobuf.ByteString - getGrantorBytes() { - java.lang.Object ref = grantor_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - grantor_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - public static final int GRANTOR_TYPE_FIELD_NUMBER = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - public boolean hasGrantorType() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { - return grantorType_; - } - - // optional bool grant_option = 6; - public static final int GRANT_OPTION_FIELD_NUMBER = 6; - private boolean grantOption_; - /** - * optional bool grant_option = 6; - */ - public boolean hasGrantOption() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional bool grant_option = 6; - */ - public boolean getGrantOption() { - return grantOption_; - } - - private void initFields() { - principalName_ = ""; - principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - addTime_ = 0L; - grantor_ = ""; - grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - grantOption_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPrincipalName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasPrincipalType()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPrincipalNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, principalType_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, addTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, getGrantorBytes()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeEnum(5, grantorType_.getNumber()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBool(6, grantOption_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPrincipalNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, principalType_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, addTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, getGrantorBytes()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(5, grantorType_.getNumber()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(6, grantOption_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - principalName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - bitField0_ = (bitField0_ & ~0x00000002); - addTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - grantor_ = ""; - bitField0_ = (bitField0_ & ~0x00000008); - grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - bitField0_ = (bitField0_ & ~0x00000010); - grantOption_ = false; - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.principalName_ = principalName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.principalType_ = principalType_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.addTime_ = addTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.grantor_ = grantor_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.grantorType_ = grantorType_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.grantOption_ = grantOption_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()) return this; - if (other.hasPrincipalName()) { - bitField0_ |= 0x00000001; - principalName_ = other.principalName_; - onChanged(); - } - if (other.hasPrincipalType()) { - setPrincipalType(other.getPrincipalType()); - } - if (other.hasAddTime()) { - setAddTime(other.getAddTime()); - } - if (other.hasGrantor()) { - bitField0_ |= 0x00000008; - grantor_ = other.grantor_; - onChanged(); - } - if (other.hasGrantorType()) { - setGrantorType(other.getGrantorType()); - } - if (other.hasGrantOption()) { - setGrantOption(other.getGrantOption()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPrincipalName()) { - - return false; - } - if (!hasPrincipalType()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string principal_name = 1; - private java.lang.Object principalName_ = ""; - /** - * required string principal_name = 1; - */ - public boolean hasPrincipalName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string principal_name = 1; - */ - public java.lang.String getPrincipalName() { - java.lang.Object ref = principalName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - principalName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string principal_name = 1; - */ - public com.google.protobuf.ByteString - getPrincipalNameBytes() { - java.lang.Object ref = principalName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - principalName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string principal_name = 1; - */ - public Builder setPrincipalName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - principalName_ = value; - onChanged(); - return this; - } - /** - * required string principal_name = 1; - */ - public Builder clearPrincipalName() { - bitField0_ = (bitField0_ & ~0x00000001); - principalName_ = getDefaultInstance().getPrincipalName(); - onChanged(); - return this; - } - /** - * required string principal_name = 1; - */ - public Builder setPrincipalNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - principalName_ = value; - onChanged(); - return this; - } - - // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - public boolean hasPrincipalType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType() { - return principalType_; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - public Builder setPrincipalType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - principalType_ = value; - onChanged(); - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; - */ - public Builder clearPrincipalType() { - bitField0_ = (bitField0_ & ~0x00000002); - principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - onChanged(); - return this; - } - - // optional int64 add_time = 3; - private long addTime_ ; - /** - * optional int64 add_time = 3; - */ - public boolean hasAddTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 add_time = 3; - */ - public long getAddTime() { - return addTime_; - } - /** - * optional int64 add_time = 3; - */ - public Builder setAddTime(long value) { - bitField0_ |= 0x00000004; - addTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 add_time = 3; - */ - public Builder clearAddTime() { - bitField0_ = (bitField0_ & ~0x00000004); - addTime_ = 0L; - onChanged(); - return this; - } - - // optional string grantor = 4; - private java.lang.Object grantor_ = ""; - /** - * optional string grantor = 4; - */ - public boolean hasGrantor() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional string grantor = 4; - */ - public java.lang.String getGrantor() { - java.lang.Object ref = grantor_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - grantor_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string grantor = 4; - */ - public com.google.protobuf.ByteString - getGrantorBytes() { - java.lang.Object ref = grantor_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - grantor_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string grantor = 4; - */ - public Builder setGrantor( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - grantor_ = value; - onChanged(); - return this; - } - /** - * optional string grantor = 4; - */ - public Builder clearGrantor() { - bitField0_ = (bitField0_ & ~0x00000008); - grantor_ = getDefaultInstance().getGrantor(); - onChanged(); - return this; - } - /** - * optional string grantor = 4; - */ - public Builder setGrantorBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - grantor_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - public boolean hasGrantorType() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { - return grantorType_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - public Builder setGrantorType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - grantorType_ = value; - onChanged(); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; - */ - public Builder clearGrantorType() { - bitField0_ = (bitField0_ & ~0x00000010); - grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; - onChanged(); - return this; - } - - // optional bool grant_option = 6; - private boolean grantOption_ ; - /** - * optional bool grant_option = 6; - */ - public boolean hasGrantOption() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional bool grant_option = 6; - */ - public boolean getGrantOption() { - return grantOption_; - } - /** - * optional bool grant_option = 6; - */ - public Builder setGrantOption(boolean value) { - bitField0_ |= 0x00000020; - grantOption_ = value; - onChanged(); - return this; - } - /** - * optional bool grant_option = 6; - */ - public Builder clearGrantOption() { - bitField0_ = (bitField0_ & ~0x00000020); - grantOption_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo) - } - - static { - defaultInstance = new RoleGrantInfo(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo) - } - - public interface RoleGrantInfoListOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - java.util.List - getGrantInfoList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - int getGrantInfoCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - java.util.List - getGrantInfoOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList} - */ - public static final class RoleGrantInfoList extends - com.google.protobuf.GeneratedMessage - implements RoleGrantInfoListOrBuilder { - // Use RoleGrantInfoList.newBuilder() to construct. - private RoleGrantInfoList(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RoleGrantInfoList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RoleGrantInfoList defaultInstance; - public static RoleGrantInfoList getDefaultInstance() { - return defaultInstance; - } - - public RoleGrantInfoList getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RoleGrantInfoList( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - grantInfo_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - grantInfo_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - grantInfo_ = java.util.Collections.unmodifiableList(grantInfo_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RoleGrantInfoList parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RoleGrantInfoList(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - public static final int GRANT_INFO_FIELD_NUMBER = 1; - private java.util.List grantInfo_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public java.util.List getGrantInfoList() { - return grantInfo_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public java.util.List - getGrantInfoOrBuilderList() { - return grantInfo_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public int getGrantInfoCount() { - return grantInfo_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index) { - return grantInfo_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( - int index) { - return grantInfo_.get(index); - } - - private void initFields() { - grantInfo_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getGrantInfoCount(); i++) { - if (!getGrantInfo(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < grantInfo_.size(); i++) { - output.writeMessage(1, grantInfo_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < grantInfo_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, grantInfo_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoListOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getGrantInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (grantInfoBuilder_ == null) { - grantInfo_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - grantInfoBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList(this); - int from_bitField0_ = bitField0_; - if (grantInfoBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - grantInfo_ = java.util.Collections.unmodifiableList(grantInfo_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.grantInfo_ = grantInfo_; - } else { - result.grantInfo_ = grantInfoBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.getDefaultInstance()) return this; - if (grantInfoBuilder_ == null) { - if (!other.grantInfo_.isEmpty()) { - if (grantInfo_.isEmpty()) { - grantInfo_ = other.grantInfo_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureGrantInfoIsMutable(); - grantInfo_.addAll(other.grantInfo_); - } - onChanged(); - } - } else { - if (!other.grantInfo_.isEmpty()) { - if (grantInfoBuilder_.isEmpty()) { - grantInfoBuilder_.dispose(); - grantInfoBuilder_ = null; - grantInfo_ = other.grantInfo_; - bitField0_ = (bitField0_ & ~0x00000001); - grantInfoBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getGrantInfoFieldBuilder() : null; - } else { - grantInfoBuilder_.addAllMessages(other.grantInfo_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getGrantInfoCount(); i++) { - if (!getGrantInfo(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - private java.util.List grantInfo_ = - java.util.Collections.emptyList(); - private void ensureGrantInfoIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - grantInfo_ = new java.util.ArrayList(grantInfo_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder> grantInfoBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public java.util.List getGrantInfoList() { - if (grantInfoBuilder_ == null) { - return java.util.Collections.unmodifiableList(grantInfo_); - } else { - return grantInfoBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public int getGrantInfoCount() { - if (grantInfoBuilder_ == null) { - return grantInfo_.size(); - } else { - return grantInfoBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index) { - if (grantInfoBuilder_ == null) { - return grantInfo_.get(index); - } else { - return grantInfoBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder setGrantInfo( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { - if (grantInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureGrantInfoIsMutable(); - grantInfo_.set(index, value); - onChanged(); - } else { - grantInfoBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder setGrantInfo( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { - if (grantInfoBuilder_ == null) { - ensureGrantInfoIsMutable(); - grantInfo_.set(index, builderForValue.build()); - onChanged(); - } else { - grantInfoBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder addGrantInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { - if (grantInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureGrantInfoIsMutable(); - grantInfo_.add(value); - onChanged(); - } else { - grantInfoBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder addGrantInfo( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { - if (grantInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureGrantInfoIsMutable(); - grantInfo_.add(index, value); - onChanged(); - } else { - grantInfoBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder addGrantInfo( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { - if (grantInfoBuilder_ == null) { - ensureGrantInfoIsMutable(); - grantInfo_.add(builderForValue.build()); - onChanged(); - } else { - grantInfoBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder addGrantInfo( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { - if (grantInfoBuilder_ == null) { - ensureGrantInfoIsMutable(); - grantInfo_.add(index, builderForValue.build()); - onChanged(); - } else { - grantInfoBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder addAllGrantInfo( - java.lang.Iterable values) { - if (grantInfoBuilder_ == null) { - ensureGrantInfoIsMutable(); - super.addAll(values, grantInfo_); - onChanged(); - } else { - grantInfoBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder clearGrantInfo() { - if (grantInfoBuilder_ == null) { - grantInfo_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - grantInfoBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public Builder removeGrantInfo(int index) { - if (grantInfoBuilder_ == null) { - ensureGrantInfoIsMutable(); - grantInfo_.remove(index); - onChanged(); - } else { - grantInfoBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder getGrantInfoBuilder( - int index) { - return getGrantInfoFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( - int index) { - if (grantInfoBuilder_ == null) { - return grantInfo_.get(index); } else { - return grantInfoBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public java.util.List - getGrantInfoOrBuilderList() { - if (grantInfoBuilder_ != null) { - return grantInfoBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(grantInfo_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder addGrantInfoBuilder() { - return getGrantInfoFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder addGrantInfoBuilder( - int index) { - return getGrantInfoFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; - */ - public java.util.List - getGrantInfoBuilderList() { - return getGrantInfoFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder> - getGrantInfoFieldBuilder() { - if (grantInfoBuilder_ == null) { - grantInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder>( - grantInfo_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - grantInfo_ = null; - } - return grantInfoBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList) - } - - static { - defaultInstance = new RoleGrantInfoList(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList) - } - - public interface RoleListOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated string role = 1; - /** - * repeated string role = 1; - */ - java.util.List - getRoleList(); - /** - * repeated string role = 1; - */ - int getRoleCount(); - /** - * repeated string role = 1; - */ - java.lang.String getRole(int index); - /** - * repeated string role = 1; - */ - com.google.protobuf.ByteString - getRoleBytes(int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleList} - */ - public static final class RoleList extends - com.google.protobuf.GeneratedMessage - implements RoleListOrBuilder { - // Use RoleList.newBuilder() to construct. - private RoleList(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RoleList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RoleList defaultInstance; - public static RoleList getDefaultInstance() { - return defaultInstance; - } - - public RoleList getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RoleList( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - role_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - role_.add(input.readBytes()); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - role_ = new com.google.protobuf.UnmodifiableLazyStringList(role_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RoleList parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RoleList(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated string role = 1; - public static final int ROLE_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList role_; - /** - * repeated string role = 1; - */ - public java.util.List - getRoleList() { - return role_; - } - /** - * repeated string role = 1; - */ - public int getRoleCount() { - return role_.size(); - } - /** - * repeated string role = 1; - */ - public java.lang.String getRole(int index) { - return role_.get(index); - } - /** - * repeated string role = 1; - */ - public com.google.protobuf.ByteString - getRoleBytes(int index) { - return role_.getByteString(index); - } - - private void initFields() { - role_ = com.google.protobuf.LazyStringArrayList.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < role_.size(); i++) { - output.writeBytes(1, role_.getByteString(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < role_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(role_.getByteString(i)); - } - size += dataSize; - size += 1 * getRoleList().size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleList} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleListOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - role_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - role_ = new com.google.protobuf.UnmodifiableLazyStringList( - role_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.role_ = role_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.getDefaultInstance()) return this; - if (!other.role_.isEmpty()) { - if (role_.isEmpty()) { - role_ = other.role_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureRoleIsMutable(); - role_.addAll(other.role_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated string role = 1; - private com.google.protobuf.LazyStringList role_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureRoleIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - role_ = new com.google.protobuf.LazyStringArrayList(role_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated string role = 1; - */ - public java.util.List - getRoleList() { - return java.util.Collections.unmodifiableList(role_); - } - /** - * repeated string role = 1; - */ - public int getRoleCount() { - return role_.size(); - } - /** - * repeated string role = 1; - */ - public java.lang.String getRole(int index) { - return role_.get(index); - } - /** - * repeated string role = 1; - */ - public com.google.protobuf.ByteString - getRoleBytes(int index) { - return role_.getByteString(index); - } - /** - * repeated string role = 1; - */ - public Builder setRole( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureRoleIsMutable(); - role_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string role = 1; - */ - public Builder addRole( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureRoleIsMutable(); - role_.add(value); - onChanged(); - return this; - } - /** - * repeated string role = 1; - */ - public Builder addAllRole( - java.lang.Iterable values) { - ensureRoleIsMutable(); - super.addAll(values, role_); - onChanged(); - return this; - } - /** - * repeated string role = 1; - */ - public Builder clearRole() { - role_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * repeated string role = 1; - */ - public Builder addRoleBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureRoleIsMutable(); - role_.add(value); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleList) - } - - static { - defaultInstance = new RoleList(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleList) - } - - public interface RoleOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional int64 create_time = 1; - /** - * optional int64 create_time = 1; - */ - boolean hasCreateTime(); - /** - * optional int64 create_time = 1; - */ - long getCreateTime(); - - // optional string owner_name = 2; - /** - * optional string owner_name = 2; - */ - boolean hasOwnerName(); - /** - * optional string owner_name = 2; - */ - java.lang.String getOwnerName(); - /** - * optional string owner_name = 2; - */ - com.google.protobuf.ByteString - getOwnerNameBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Role} - */ - public static final class Role extends - com.google.protobuf.GeneratedMessage - implements RoleOrBuilder { - // Use Role.newBuilder() to construct. - private Role(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Role(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Role defaultInstance; - public static Role getDefaultInstance() { - return defaultInstance; - } - - public Role getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Role( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - createTime_ = input.readInt64(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - ownerName_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Role parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Role(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional int64 create_time = 1; - public static final int CREATE_TIME_FIELD_NUMBER = 1; - private long createTime_; - /** - * optional int64 create_time = 1; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 create_time = 1; - */ - public long getCreateTime() { - return createTime_; - } - - // optional string owner_name = 2; - public static final int OWNER_NAME_FIELD_NUMBER = 2; - private java.lang.Object ownerName_; - /** - * optional string owner_name = 2; - */ - public boolean hasOwnerName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string owner_name = 2; - */ - public java.lang.String getOwnerName() { - java.lang.Object ref = ownerName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - ownerName_ = s; - } - return s; - } - } - /** - * optional string owner_name = 2; - */ - public com.google.protobuf.ByteString - getOwnerNameBytes() { - java.lang.Object ref = ownerName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ownerName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - createTime_ = 0L; - ownerName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, createTime_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getOwnerNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, createTime_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getOwnerNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Role} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - ownerName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.ownerName_ = ownerName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.getDefaultInstance()) return this; - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasOwnerName()) { - bitField0_ |= 0x00000002; - ownerName_ = other.ownerName_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional int64 create_time = 1; - private long createTime_ ; - /** - * optional int64 create_time = 1; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional int64 create_time = 1; - */ - public long getCreateTime() { - return createTime_; - } - /** - * optional int64 create_time = 1; - */ - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000001; - createTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 create_time = 1; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000001); - createTime_ = 0L; - onChanged(); - return this; - } - - // optional string owner_name = 2; - private java.lang.Object ownerName_ = ""; - /** - * optional string owner_name = 2; - */ - public boolean hasOwnerName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string owner_name = 2; - */ - public java.lang.String getOwnerName() { - java.lang.Object ref = ownerName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - ownerName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string owner_name = 2; - */ - public com.google.protobuf.ByteString - getOwnerNameBytes() { - java.lang.Object ref = ownerName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ownerName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string owner_name = 2; - */ - public Builder setOwnerName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - ownerName_ = value; - onChanged(); - return this; - } - /** - * optional string owner_name = 2; - */ - public Builder clearOwnerName() { - bitField0_ = (bitField0_ & ~0x00000002); - ownerName_ = getDefaultInstance().getOwnerName(); - onChanged(); - return this; - } - /** - * optional string owner_name = 2; - */ - public Builder setOwnerNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - ownerName_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Role) - } - - static { - defaultInstance = new Role(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Role) - } - - public interface StorageDescriptorOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - java.util.List - getColsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - int getColsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - java.util.List - getColsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( - int index); - - // optional string input_format = 2; - /** - * optional string input_format = 2; - */ - boolean hasInputFormat(); - /** - * optional string input_format = 2; - */ - java.lang.String getInputFormat(); - /** - * optional string input_format = 2; - */ - com.google.protobuf.ByteString - getInputFormatBytes(); - - // optional string output_format = 3; - /** - * optional string output_format = 3; - */ - boolean hasOutputFormat(); - /** - * optional string output_format = 3; - */ - java.lang.String getOutputFormat(); - /** - * optional string output_format = 3; - */ - com.google.protobuf.ByteString - getOutputFormatBytes(); - - // optional bool is_compressed = 4; - /** - * optional bool is_compressed = 4; - */ - boolean hasIsCompressed(); - /** - * optional bool is_compressed = 4; - */ - boolean getIsCompressed(); - - // optional sint32 num_buckets = 5; - /** - * optional sint32 num_buckets = 5; - */ - boolean hasNumBuckets(); - /** - * optional sint32 num_buckets = 5; - */ - int getNumBuckets(); - - // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - boolean hasSerdeInfo(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder(); - - // repeated string bucket_cols = 7; - /** - * repeated string bucket_cols = 7; - */ - java.util.List - getBucketColsList(); - /** - * repeated string bucket_cols = 7; - */ - int getBucketColsCount(); - /** - * repeated string bucket_cols = 7; - */ - java.lang.String getBucketCols(int index); - /** - * repeated string bucket_cols = 7; - */ - com.google.protobuf.ByteString - getBucketColsBytes(int index); - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - java.util.List - getSortColsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - int getSortColsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - java.util.List - getSortColsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( - int index); - - // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - boolean hasSkewedInfo(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder(); - - // optional bool stored_as_sub_directories = 10; - /** - * optional bool stored_as_sub_directories = 10; - */ - boolean hasStoredAsSubDirectories(); - /** - * optional bool stored_as_sub_directories = 10; - */ - boolean getStoredAsSubDirectories(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor} - */ - public static final class StorageDescriptor extends - com.google.protobuf.GeneratedMessage - implements StorageDescriptorOrBuilder { - // Use StorageDescriptor.newBuilder() to construct. - private StorageDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private StorageDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final StorageDescriptor defaultInstance; - public static StorageDescriptor getDefaultInstance() { - return defaultInstance; - } - - public StorageDescriptor getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private StorageDescriptor( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - cols_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.PARSER, extensionRegistry)); - break; - } - case 18: { - bitField0_ |= 0x00000001; - inputFormat_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000002; - outputFormat_ = input.readBytes(); - break; - } - case 32: { - bitField0_ |= 0x00000004; - isCompressed_ = input.readBool(); - break; - } - case 40: { - bitField0_ |= 0x00000008; - numBuckets_ = input.readSInt32(); - break; - } - case 50: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder subBuilder = null; - if (((bitField0_ & 0x00000010) == 0x00000010)) { - subBuilder = serdeInfo_.toBuilder(); - } - serdeInfo_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(serdeInfo_); - serdeInfo_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000010; - break; - } - case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - bucketCols_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000040; - } - bucketCols_.add(input.readBytes()); - break; - } - case 66: { - if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { - sortCols_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000080; - } - sortCols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.PARSER, extensionRegistry)); - break; - } - case 74: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder subBuilder = null; - if (((bitField0_ & 0x00000020) == 0x00000020)) { - subBuilder = skewedInfo_.toBuilder(); - } - skewedInfo_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(skewedInfo_); - skewedInfo_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000020; - break; - } - case 80: { - bitField0_ |= 0x00000040; - storedAsSubDirectories_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - } - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - bucketCols_ = new com.google.protobuf.UnmodifiableLazyStringList(bucketCols_); - } - if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { - sortCols_ = java.util.Collections.unmodifiableList(sortCols_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public StorageDescriptor parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new StorageDescriptor(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface OrderOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string column_name = 1; - /** - * required string column_name = 1; - */ - boolean hasColumnName(); - /** - * required string column_name = 1; - */ - java.lang.String getColumnName(); - /** - * required string column_name = 1; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); - - // optional sint32 order = 2 [default = 1]; - /** - * optional sint32 order = 2 [default = 1]; - */ - boolean hasOrder(); - /** - * optional sint32 order = 2 [default = 1]; - */ - int getOrder(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} - */ - public static final class Order extends - com.google.protobuf.GeneratedMessage - implements OrderOrBuilder { - // Use Order.newBuilder() to construct. - private Order(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Order(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Order defaultInstance; - public static Order getDefaultInstance() { - return defaultInstance; - } - - public Order getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Order( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - columnName_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - order_ = input.readSInt32(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Order parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Order(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string column_name = 1; - public static final int COLUMN_NAME_FIELD_NUMBER = 1; - private java.lang.Object columnName_; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional sint32 order = 2 [default = 1]; - public static final int ORDER_FIELD_NUMBER = 2; - private int order_; - /** - * optional sint32 order = 2 [default = 1]; - */ - public boolean hasOrder() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional sint32 order = 2 [default = 1]; - */ - public int getOrder() { - return order_; - } - - private void initFields() { - columnName_ = ""; - order_ = 1; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasColumnName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeSInt32(2, order_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt32Size(2, order_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - columnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - order_ = 1; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.columnName_ = columnName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.order_ = order_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()) return this; - if (other.hasColumnName()) { - bitField0_ |= 0x00000001; - columnName_ = other.columnName_; - onChanged(); - } - if (other.hasOrder()) { - setOrder(other.getOrder()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasColumnName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string column_name = 1; - private java.lang.Object columnName_ = ""; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string column_name = 1; - */ - public Builder setColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000001); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - - // optional sint32 order = 2 [default = 1]; - private int order_ = 1; - /** - * optional sint32 order = 2 [default = 1]; - */ - public boolean hasOrder() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional sint32 order = 2 [default = 1]; - */ - public int getOrder() { - return order_; - } - /** - * optional sint32 order = 2 [default = 1]; - */ - public Builder setOrder(int value) { - bitField0_ |= 0x00000002; - order_ = value; - onChanged(); - return this; - } - /** - * optional sint32 order = 2 [default = 1]; - */ - public Builder clearOrder() { - bitField0_ = (bitField0_ & ~0x00000002); - order_ = 1; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) - } - - static { - defaultInstance = new Order(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) - } - - public interface SerDeInfoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string name = 1; - /** - * optional string name = 1; - */ - boolean hasName(); - /** - * optional string name = 1; - */ - java.lang.String getName(); - /** - * optional string name = 1; - */ - com.google.protobuf.ByteString - getNameBytes(); - - // optional string serialization_lib = 2; - /** - * optional string serialization_lib = 2; - */ - boolean hasSerializationLib(); - /** - * optional string serialization_lib = 2; - */ - java.lang.String getSerializationLib(); - /** - * optional string serialization_lib = 2; - */ - com.google.protobuf.ByteString - getSerializationLibBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - boolean hasParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo} - */ - public static final class SerDeInfo extends - com.google.protobuf.GeneratedMessage - implements SerDeInfoOrBuilder { - // Use SerDeInfo.newBuilder() to construct. - private SerDeInfo(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SerDeInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SerDeInfo defaultInstance; - public static SerDeInfo getDefaultInstance() { - return defaultInstance; - } - - public SerDeInfo getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SerDeInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - name_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - serializationLib_ = input.readBytes(); - break; - } - case 26: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000004) == 0x00000004)) { - subBuilder = parameters_.toBuilder(); - } - parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(parameters_); - parameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000004; - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SerDeInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SerDeInfo(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; - /** - * optional string name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - name_ = s; - } - return s; - } - } - /** - * optional string name = 1; - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string serialization_lib = 2; - public static final int SERIALIZATION_LIB_FIELD_NUMBER = 2; - private java.lang.Object serializationLib_; - /** - * optional string serialization_lib = 2; - */ - public boolean hasSerializationLib() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string serialization_lib = 2; - */ - public java.lang.String getSerializationLib() { - java.lang.Object ref = serializationLib_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - serializationLib_ = s; - } - return s; - } - } - /** - * optional string serialization_lib = 2; - */ - public com.google.protobuf.ByteString - getSerializationLibBytes() { - java.lang.Object ref = serializationLib_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - serializationLib_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - public static final int PARAMETERS_FIELD_NUMBER = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - return parameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - return parameters_; - } - - private void initFields() { - name_ = ""; - serializationLib_ = ""; - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (hasParameters()) { - if (!getParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getSerializationLibBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, parameters_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getSerializationLibBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, parameters_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getParametersFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - name_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - serializationLib_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.name_ = name_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.serializationLib_ = serializationLib_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (parametersBuilder_ == null) { - result.parameters_ = parameters_; - } else { - result.parameters_ = parametersBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance()) return this; - if (other.hasName()) { - bitField0_ |= 0x00000001; - name_ = other.name_; - onChanged(); - } - if (other.hasSerializationLib()) { - bitField0_ |= 0x00000002; - serializationLib_ = other.serializationLib_; - onChanged(); - } - if (other.hasParameters()) { - mergeParameters(other.getParameters()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasParameters()) { - if (!getParameters().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional string name = 1; - private java.lang.Object name_ = ""; - /** - * optional string name = 1; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string name = 1; - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string name = 1; - */ - public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string name = 1; - */ - public Builder setName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - /** - * optional string name = 1; - */ - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * optional string name = 1; - */ - public Builder setNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - - // optional string serialization_lib = 2; - private java.lang.Object serializationLib_ = ""; - /** - * optional string serialization_lib = 2; - */ - public boolean hasSerializationLib() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string serialization_lib = 2; - */ - public java.lang.String getSerializationLib() { - java.lang.Object ref = serializationLib_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - serializationLib_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string serialization_lib = 2; - */ - public com.google.protobuf.ByteString - getSerializationLibBytes() { - java.lang.Object ref = serializationLib_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - serializationLib_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string serialization_lib = 2; - */ - public Builder setSerializationLib( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - serializationLib_ = value; - onChanged(); - return this; - } - /** - * optional string serialization_lib = 2; - */ - public Builder clearSerializationLib() { - bitField0_ = (bitField0_ & ~0x00000002); - serializationLib_ = getDefaultInstance().getSerializationLib(); - onChanged(); - return this; - } - /** - * optional string serialization_lib = 2; - */ - public Builder setSerializationLibBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - serializationLib_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - if (parametersBuilder_ == null) { - return parameters_; - } else { - return parametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - parameters_ = value; - onChanged(); - } else { - parametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder setParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (parametersBuilder_ == null) { - parameters_ = builderForValue.build(); - onChanged(); - } else { - parametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - parameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); - } else { - parameters_ = value; - } - onChanged(); - } else { - parametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public Builder clearParameters() { - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - if (parametersBuilder_ != null) { - return parametersBuilder_.getMessageOrBuilder(); - } else { - return parameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getParametersFieldBuilder() { - if (parametersBuilder_ == null) { - parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - parameters_, - getParentForChildren(), - isClean()); - parameters_ = null; - } - return parametersBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo) - } - - static { - defaultInstance = new SerDeInfo(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo) - } - - public interface SkewedInfoOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated string skewed_col_names = 1; - /** - * repeated string skewed_col_names = 1; - */ - java.util.List - getSkewedColNamesList(); - /** - * repeated string skewed_col_names = 1; - */ - int getSkewedColNamesCount(); - /** - * repeated string skewed_col_names = 1; - */ - java.lang.String getSkewedColNames(int index); - /** - * repeated string skewed_col_names = 1; - */ - com.google.protobuf.ByteString - getSkewedColNamesBytes(int index); - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - java.util.List - getSkewedColValuesList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - int getSkewedColValuesCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - java.util.List - getSkewedColValuesOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( - int index); - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - java.util.List - getSkewedColValueLocationMapsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - int getSkewedColValueLocationMapsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - java.util.List - getSkewedColValueLocationMapsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo} - */ - public static final class SkewedInfo extends - com.google.protobuf.GeneratedMessage - implements SkewedInfoOrBuilder { - // Use SkewedInfo.newBuilder() to construct. - private SkewedInfo(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SkewedInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SkewedInfo defaultInstance; - public static SkewedInfo getDefaultInstance() { - return defaultInstance; - } - - public SkewedInfo getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SkewedInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - skewedColNames_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - skewedColNames_.add(input.readBytes()); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - skewedColValues_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - skewedColValues_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.PARSER, extensionRegistry)); - break; - } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - skewedColValueLocationMaps_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - skewedColValueLocationMaps_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - skewedColNames_ = new com.google.protobuf.UnmodifiableLazyStringList(skewedColNames_); - } - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); - } - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - skewedColValueLocationMaps_ = java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SkewedInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SkewedInfo(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface SkewedColValueListOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated string skewed_col_value = 1; - /** - * repeated string skewed_col_value = 1; - */ - java.util.List - getSkewedColValueList(); - /** - * repeated string skewed_col_value = 1; - */ - int getSkewedColValueCount(); - /** - * repeated string skewed_col_value = 1; - */ - java.lang.String getSkewedColValue(int index); - /** - * repeated string skewed_col_value = 1; - */ - com.google.protobuf.ByteString - getSkewedColValueBytes(int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList} - */ - public static final class SkewedColValueList extends - com.google.protobuf.GeneratedMessage - implements SkewedColValueListOrBuilder { - // Use SkewedColValueList.newBuilder() to construct. - private SkewedColValueList(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SkewedColValueList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SkewedColValueList defaultInstance; - public static SkewedColValueList getDefaultInstance() { - return defaultInstance; - } - - public SkewedColValueList getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SkewedColValueList( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - skewedColValue_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - skewedColValue_.add(input.readBytes()); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - skewedColValue_ = new com.google.protobuf.UnmodifiableLazyStringList(skewedColValue_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SkewedColValueList parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SkewedColValueList(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated string skewed_col_value = 1; - public static final int SKEWED_COL_VALUE_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList skewedColValue_; - /** - * repeated string skewed_col_value = 1; - */ - public java.util.List - getSkewedColValueList() { - return skewedColValue_; - } - /** - * repeated string skewed_col_value = 1; - */ - public int getSkewedColValueCount() { - return skewedColValue_.size(); - } - /** - * repeated string skewed_col_value = 1; - */ - public java.lang.String getSkewedColValue(int index) { - return skewedColValue_.get(index); - } - /** - * repeated string skewed_col_value = 1; - */ - public com.google.protobuf.ByteString - getSkewedColValueBytes(int index) { - return skewedColValue_.getByteString(index); - } - - private void initFields() { - skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < skewedColValue_.size(); i++) { - output.writeBytes(1, skewedColValue_.getByteString(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < skewedColValue_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(skewedColValue_.getByteString(i)); - } - size += dataSize; - size += 1 * getSkewedColValueList().size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - skewedColValue_ = new com.google.protobuf.UnmodifiableLazyStringList( - skewedColValue_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.skewedColValue_ = skewedColValue_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()) return this; - if (!other.skewedColValue_.isEmpty()) { - if (skewedColValue_.isEmpty()) { - skewedColValue_ = other.skewedColValue_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureSkewedColValueIsMutable(); - skewedColValue_.addAll(other.skewedColValue_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated string skewed_col_value = 1; - private com.google.protobuf.LazyStringList skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureSkewedColValueIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - skewedColValue_ = new com.google.protobuf.LazyStringArrayList(skewedColValue_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated string skewed_col_value = 1; - */ - public java.util.List - getSkewedColValueList() { - return java.util.Collections.unmodifiableList(skewedColValue_); - } - /** - * repeated string skewed_col_value = 1; - */ - public int getSkewedColValueCount() { - return skewedColValue_.size(); - } - /** - * repeated string skewed_col_value = 1; - */ - public java.lang.String getSkewedColValue(int index) { - return skewedColValue_.get(index); - } - /** - * repeated string skewed_col_value = 1; - */ - public com.google.protobuf.ByteString - getSkewedColValueBytes(int index) { - return skewedColValue_.getByteString(index); - } - /** - * repeated string skewed_col_value = 1; - */ - public Builder setSkewedColValue( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValueIsMutable(); - skewedColValue_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string skewed_col_value = 1; - */ - public Builder addSkewedColValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValueIsMutable(); - skewedColValue_.add(value); - onChanged(); - return this; - } - /** - * repeated string skewed_col_value = 1; - */ - public Builder addAllSkewedColValue( - java.lang.Iterable values) { - ensureSkewedColValueIsMutable(); - super.addAll(values, skewedColValue_); - onChanged(); - return this; - } - /** - * repeated string skewed_col_value = 1; - */ - public Builder clearSkewedColValue() { - skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * repeated string skewed_col_value = 1; - */ - public Builder addSkewedColValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValueIsMutable(); - skewedColValue_.add(value); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList) - } - - static { - defaultInstance = new SkewedColValueList(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList) - } - - public interface SkewedColValueLocationMapOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated string key = 1; - /** - * repeated string key = 1; - */ - java.util.List - getKeyList(); - /** - * repeated string key = 1; - */ - int getKeyCount(); - /** - * repeated string key = 1; - */ - java.lang.String getKey(int index); - /** - * repeated string key = 1; - */ - com.google.protobuf.ByteString - getKeyBytes(int index); - - // required string value = 2; - /** - * required string value = 2; - */ - boolean hasValue(); - /** - * required string value = 2; - */ - java.lang.String getValue(); - /** - * required string value = 2; - */ - com.google.protobuf.ByteString - getValueBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap} - */ - public static final class SkewedColValueLocationMap extends - com.google.protobuf.GeneratedMessage - implements SkewedColValueLocationMapOrBuilder { - // Use SkewedColValueLocationMap.newBuilder() to construct. - private SkewedColValueLocationMap(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SkewedColValueLocationMap(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SkewedColValueLocationMap defaultInstance; - public static SkewedColValueLocationMap getDefaultInstance() { - return defaultInstance; - } - - public SkewedColValueLocationMap getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SkewedColValueLocationMap( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - key_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - key_.add(input.readBytes()); - break; - } - case 18: { - bitField0_ |= 0x00000001; - value_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - key_ = new com.google.protobuf.UnmodifiableLazyStringList(key_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SkewedColValueLocationMap parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SkewedColValueLocationMap(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // repeated string key = 1; - public static final int KEY_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList key_; - /** - * repeated string key = 1; - */ - public java.util.List - getKeyList() { - return key_; - } - /** - * repeated string key = 1; - */ - public int getKeyCount() { - return key_.size(); - } - /** - * repeated string key = 1; - */ - public java.lang.String getKey(int index) { - return key_.get(index); - } - /** - * repeated string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes(int index) { - return key_.getByteString(index); - } - - // required string value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private java.lang.Object value_; - /** - * required string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } - } - /** - * required string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - key_ = com.google.protobuf.LazyStringArrayList.EMPTY; - value_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasValue()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < key_.size(); i++) { - output.writeBytes(1, key_.getByteString(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(2, getValueBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < key_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(key_.getByteString(i)); - } - size += dataSize; - size += 1 * getKeyList().size(); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getValueBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - key_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - key_ = new com.google.protobuf.UnmodifiableLazyStringList( - key_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.key_ = key_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()) return this; - if (!other.key_.isEmpty()) { - if (key_.isEmpty()) { - key_ = other.key_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureKeyIsMutable(); - key_.addAll(other.key_); - } - onChanged(); - } - if (other.hasValue()) { - bitField0_ |= 0x00000002; - value_ = other.value_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasValue()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated string key = 1; - private com.google.protobuf.LazyStringList key_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureKeyIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - key_ = new com.google.protobuf.LazyStringArrayList(key_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated string key = 1; - */ - public java.util.List - getKeyList() { - return java.util.Collections.unmodifiableList(key_); - } - /** - * repeated string key = 1; - */ - public int getKeyCount() { - return key_.size(); - } - /** - * repeated string key = 1; - */ - public java.lang.String getKey(int index) { - return key_.get(index); - } - /** - * repeated string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes(int index) { - return key_.getByteString(index); - } - /** - * repeated string key = 1; - */ - public Builder setKey( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureKeyIsMutable(); - key_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string key = 1; - */ - public Builder addKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureKeyIsMutable(); - key_.add(value); - onChanged(); - return this; - } - /** - * repeated string key = 1; - */ - public Builder addAllKey( - java.lang.Iterable values) { - ensureKeyIsMutable(); - super.addAll(values, key_); - onChanged(); - return this; - } - /** - * repeated string key = 1; - */ - public Builder clearKey() { - key_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * repeated string key = 1; - */ - public Builder addKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureKeyIsMutable(); - key_.add(value); - onChanged(); - return this; - } - - // required string value = 2; - private java.lang.Object value_ = ""; - /** - * required string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - value_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string value = 2; - */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - /** - * required string value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); - onChanged(); - return this; - } - /** - * required string value = 2; - */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) - } - - static { - defaultInstance = new SkewedColValueLocationMap(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) - } - - // repeated string skewed_col_names = 1; - public static final int SKEWED_COL_NAMES_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList skewedColNames_; - /** - * repeated string skewed_col_names = 1; - */ - public java.util.List - getSkewedColNamesList() { - return skewedColNames_; - } - /** - * repeated string skewed_col_names = 1; - */ - public int getSkewedColNamesCount() { - return skewedColNames_.size(); - } - /** - * repeated string skewed_col_names = 1; - */ - public java.lang.String getSkewedColNames(int index) { - return skewedColNames_.get(index); - } - /** - * repeated string skewed_col_names = 1; - */ - public com.google.protobuf.ByteString - getSkewedColNamesBytes(int index) { - return skewedColNames_.getByteString(index); - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - public static final int SKEWED_COL_VALUES_FIELD_NUMBER = 2; - private java.util.List skewedColValues_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public java.util.List getSkewedColValuesList() { - return skewedColValues_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public java.util.List - getSkewedColValuesOrBuilderList() { - return skewedColValues_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public int getSkewedColValuesCount() { - return skewedColValues_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index) { - return skewedColValues_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( - int index) { - return skewedColValues_.get(index); - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - public static final int SKEWED_COL_VALUE_LOCATION_MAPS_FIELD_NUMBER = 3; - private java.util.List skewedColValueLocationMaps_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public java.util.List getSkewedColValueLocationMapsList() { - return skewedColValueLocationMaps_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public java.util.List - getSkewedColValueLocationMapsOrBuilderList() { - return skewedColValueLocationMaps_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public int getSkewedColValueLocationMapsCount() { - return skewedColValueLocationMaps_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index) { - return skewedColValueLocationMaps_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( - int index) { - return skewedColValueLocationMaps_.get(index); - } - - private void initFields() { - skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; - skewedColValues_ = java.util.Collections.emptyList(); - skewedColValueLocationMaps_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getSkewedColValueLocationMapsCount(); i++) { - if (!getSkewedColValueLocationMaps(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < skewedColNames_.size(); i++) { - output.writeBytes(1, skewedColNames_.getByteString(i)); - } - for (int i = 0; i < skewedColValues_.size(); i++) { - output.writeMessage(2, skewedColValues_.get(i)); - } - for (int i = 0; i < skewedColValueLocationMaps_.size(); i++) { - output.writeMessage(3, skewedColValueLocationMaps_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < skewedColNames_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(skewedColNames_.getByteString(i)); - } - size += dataSize; - size += 1 * getSkewedColNamesList().size(); - } - for (int i = 0; i < skewedColValues_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, skewedColValues_.get(i)); - } - for (int i = 0; i < skewedColValueLocationMaps_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, skewedColValueLocationMaps_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSkewedColValuesFieldBuilder(); - getSkewedColValueLocationMapsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - if (skewedColValuesBuilder_ == null) { - skewedColValues_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - skewedColValuesBuilder_.clear(); - } - if (skewedColValueLocationMapsBuilder_ == null) { - skewedColValueLocationMaps_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - skewedColValueLocationMapsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - skewedColNames_ = new com.google.protobuf.UnmodifiableLazyStringList( - skewedColNames_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.skewedColNames_ = skewedColNames_; - if (skewedColValuesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.skewedColValues_ = skewedColValues_; - } else { - result.skewedColValues_ = skewedColValuesBuilder_.build(); - } - if (skewedColValueLocationMapsBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - skewedColValueLocationMaps_ = java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.skewedColValueLocationMaps_ = skewedColValueLocationMaps_; - } else { - result.skewedColValueLocationMaps_ = skewedColValueLocationMapsBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance()) return this; - if (!other.skewedColNames_.isEmpty()) { - if (skewedColNames_.isEmpty()) { - skewedColNames_ = other.skewedColNames_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureSkewedColNamesIsMutable(); - skewedColNames_.addAll(other.skewedColNames_); - } - onChanged(); - } - if (skewedColValuesBuilder_ == null) { - if (!other.skewedColValues_.isEmpty()) { - if (skewedColValues_.isEmpty()) { - skewedColValues_ = other.skewedColValues_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureSkewedColValuesIsMutable(); - skewedColValues_.addAll(other.skewedColValues_); - } - onChanged(); - } - } else { - if (!other.skewedColValues_.isEmpty()) { - if (skewedColValuesBuilder_.isEmpty()) { - skewedColValuesBuilder_.dispose(); - skewedColValuesBuilder_ = null; - skewedColValues_ = other.skewedColValues_; - bitField0_ = (bitField0_ & ~0x00000002); - skewedColValuesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getSkewedColValuesFieldBuilder() : null; - } else { - skewedColValuesBuilder_.addAllMessages(other.skewedColValues_); - } - } - } - if (skewedColValueLocationMapsBuilder_ == null) { - if (!other.skewedColValueLocationMaps_.isEmpty()) { - if (skewedColValueLocationMaps_.isEmpty()) { - skewedColValueLocationMaps_ = other.skewedColValueLocationMaps_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.addAll(other.skewedColValueLocationMaps_); - } - onChanged(); - } - } else { - if (!other.skewedColValueLocationMaps_.isEmpty()) { - if (skewedColValueLocationMapsBuilder_.isEmpty()) { - skewedColValueLocationMapsBuilder_.dispose(); - skewedColValueLocationMapsBuilder_ = null; - skewedColValueLocationMaps_ = other.skewedColValueLocationMaps_; - bitField0_ = (bitField0_ & ~0x00000004); - skewedColValueLocationMapsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getSkewedColValueLocationMapsFieldBuilder() : null; - } else { - skewedColValueLocationMapsBuilder_.addAllMessages(other.skewedColValueLocationMaps_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getSkewedColValueLocationMapsCount(); i++) { - if (!getSkewedColValueLocationMaps(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated string skewed_col_names = 1; - private com.google.protobuf.LazyStringList skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureSkewedColNamesIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - skewedColNames_ = new com.google.protobuf.LazyStringArrayList(skewedColNames_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated string skewed_col_names = 1; - */ - public java.util.List - getSkewedColNamesList() { - return java.util.Collections.unmodifiableList(skewedColNames_); - } - /** - * repeated string skewed_col_names = 1; - */ - public int getSkewedColNamesCount() { - return skewedColNames_.size(); - } - /** - * repeated string skewed_col_names = 1; - */ - public java.lang.String getSkewedColNames(int index) { - return skewedColNames_.get(index); - } - /** - * repeated string skewed_col_names = 1; - */ - public com.google.protobuf.ByteString - getSkewedColNamesBytes(int index) { - return skewedColNames_.getByteString(index); - } - /** - * repeated string skewed_col_names = 1; - */ - public Builder setSkewedColNames( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColNamesIsMutable(); - skewedColNames_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string skewed_col_names = 1; - */ - public Builder addSkewedColNames( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColNamesIsMutable(); - skewedColNames_.add(value); - onChanged(); - return this; - } - /** - * repeated string skewed_col_names = 1; - */ - public Builder addAllSkewedColNames( - java.lang.Iterable values) { - ensureSkewedColNamesIsMutable(); - super.addAll(values, skewedColNames_); - onChanged(); - return this; - } - /** - * repeated string skewed_col_names = 1; - */ - public Builder clearSkewedColNames() { - skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * repeated string skewed_col_names = 1; - */ - public Builder addSkewedColNamesBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColNamesIsMutable(); - skewedColNames_.add(value); - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - private java.util.List skewedColValues_ = - java.util.Collections.emptyList(); - private void ensureSkewedColValuesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - skewedColValues_ = new java.util.ArrayList(skewedColValues_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder> skewedColValuesBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public java.util.List getSkewedColValuesList() { - if (skewedColValuesBuilder_ == null) { - return java.util.Collections.unmodifiableList(skewedColValues_); - } else { - return skewedColValuesBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public int getSkewedColValuesCount() { - if (skewedColValuesBuilder_ == null) { - return skewedColValues_.size(); - } else { - return skewedColValuesBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index) { - if (skewedColValuesBuilder_ == null) { - return skewedColValues_.get(index); - } else { - return skewedColValuesBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder setSkewedColValues( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { - if (skewedColValuesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValuesIsMutable(); - skewedColValues_.set(index, value); - onChanged(); - } else { - skewedColValuesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder setSkewedColValues( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { - if (skewedColValuesBuilder_ == null) { - ensureSkewedColValuesIsMutable(); - skewedColValues_.set(index, builderForValue.build()); - onChanged(); - } else { - skewedColValuesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder addSkewedColValues(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { - if (skewedColValuesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValuesIsMutable(); - skewedColValues_.add(value); - onChanged(); - } else { - skewedColValuesBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder addSkewedColValues( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { - if (skewedColValuesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValuesIsMutable(); - skewedColValues_.add(index, value); - onChanged(); - } else { - skewedColValuesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder addSkewedColValues( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { - if (skewedColValuesBuilder_ == null) { - ensureSkewedColValuesIsMutable(); - skewedColValues_.add(builderForValue.build()); - onChanged(); - } else { - skewedColValuesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder addSkewedColValues( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { - if (skewedColValuesBuilder_ == null) { - ensureSkewedColValuesIsMutable(); - skewedColValues_.add(index, builderForValue.build()); - onChanged(); - } else { - skewedColValuesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder addAllSkewedColValues( - java.lang.Iterable values) { - if (skewedColValuesBuilder_ == null) { - ensureSkewedColValuesIsMutable(); - super.addAll(values, skewedColValues_); - onChanged(); - } else { - skewedColValuesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder clearSkewedColValues() { - if (skewedColValuesBuilder_ == null) { - skewedColValues_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - skewedColValuesBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public Builder removeSkewedColValues(int index) { - if (skewedColValuesBuilder_ == null) { - ensureSkewedColValuesIsMutable(); - skewedColValues_.remove(index); - onChanged(); - } else { - skewedColValuesBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder getSkewedColValuesBuilder( - int index) { - return getSkewedColValuesFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( - int index) { - if (skewedColValuesBuilder_ == null) { - return skewedColValues_.get(index); } else { - return skewedColValuesBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public java.util.List - getSkewedColValuesOrBuilderList() { - if (skewedColValuesBuilder_ != null) { - return skewedColValuesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(skewedColValues_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder addSkewedColValuesBuilder() { - return getSkewedColValuesFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder addSkewedColValuesBuilder( - int index) { - return getSkewedColValuesFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; - */ - public java.util.List - getSkewedColValuesBuilderList() { - return getSkewedColValuesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder> - getSkewedColValuesFieldBuilder() { - if (skewedColValuesBuilder_ == null) { - skewedColValuesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder>( - skewedColValues_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - skewedColValues_ = null; - } - return skewedColValuesBuilder_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - private java.util.List skewedColValueLocationMaps_ = - java.util.Collections.emptyList(); - private void ensureSkewedColValueLocationMapsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - skewedColValueLocationMaps_ = new java.util.ArrayList(skewedColValueLocationMaps_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder> skewedColValueLocationMapsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public java.util.List getSkewedColValueLocationMapsList() { - if (skewedColValueLocationMapsBuilder_ == null) { - return java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); - } else { - return skewedColValueLocationMapsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public int getSkewedColValueLocationMapsCount() { - if (skewedColValueLocationMapsBuilder_ == null) { - return skewedColValueLocationMaps_.size(); - } else { - return skewedColValueLocationMapsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index) { - if (skewedColValueLocationMapsBuilder_ == null) { - return skewedColValueLocationMaps_.get(index); - } else { - return skewedColValueLocationMapsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder setSkewedColValueLocationMaps( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { - if (skewedColValueLocationMapsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.set(index, value); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder setSkewedColValueLocationMaps( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { - if (skewedColValueLocationMapsBuilder_ == null) { - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.set(index, builderForValue.build()); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder addSkewedColValueLocationMaps(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { - if (skewedColValueLocationMapsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.add(value); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder addSkewedColValueLocationMaps( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { - if (skewedColValueLocationMapsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.add(index, value); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder addSkewedColValueLocationMaps( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { - if (skewedColValueLocationMapsBuilder_ == null) { - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.add(builderForValue.build()); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder addSkewedColValueLocationMaps( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { - if (skewedColValueLocationMapsBuilder_ == null) { - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.add(index, builderForValue.build()); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder addAllSkewedColValueLocationMaps( - java.lang.Iterable values) { - if (skewedColValueLocationMapsBuilder_ == null) { - ensureSkewedColValueLocationMapsIsMutable(); - super.addAll(values, skewedColValueLocationMaps_); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder clearSkewedColValueLocationMaps() { - if (skewedColValueLocationMapsBuilder_ == null) { - skewedColValueLocationMaps_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public Builder removeSkewedColValueLocationMaps(int index) { - if (skewedColValueLocationMapsBuilder_ == null) { - ensureSkewedColValueLocationMapsIsMutable(); - skewedColValueLocationMaps_.remove(index); - onChanged(); - } else { - skewedColValueLocationMapsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder getSkewedColValueLocationMapsBuilder( - int index) { - return getSkewedColValueLocationMapsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( - int index) { - if (skewedColValueLocationMapsBuilder_ == null) { - return skewedColValueLocationMaps_.get(index); } else { - return skewedColValueLocationMapsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public java.util.List - getSkewedColValueLocationMapsOrBuilderList() { - if (skewedColValueLocationMapsBuilder_ != null) { - return skewedColValueLocationMapsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder addSkewedColValueLocationMapsBuilder() { - return getSkewedColValueLocationMapsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder addSkewedColValueLocationMapsBuilder( - int index) { - return getSkewedColValueLocationMapsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; - */ - public java.util.List - getSkewedColValueLocationMapsBuilderList() { - return getSkewedColValueLocationMapsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder> - getSkewedColValueLocationMapsFieldBuilder() { - if (skewedColValueLocationMapsBuilder_ == null) { - skewedColValueLocationMapsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder>( - skewedColValueLocationMaps_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - skewedColValueLocationMaps_ = null; - } - return skewedColValueLocationMapsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo) - } - - static { - defaultInstance = new SkewedInfo(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo) - } - - private int bitField0_; - // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - public static final int COLS_FIELD_NUMBER = 1; - private java.util.List cols_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public java.util.List getColsList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public java.util.List - getColsOrBuilderList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public int getColsCount() { - return cols_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index) { - return cols_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( - int index) { - return cols_.get(index); - } - - // optional string input_format = 2; - public static final int INPUT_FORMAT_FIELD_NUMBER = 2; - private java.lang.Object inputFormat_; - /** - * optional string input_format = 2; - */ - public boolean hasInputFormat() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string input_format = 2; - */ - public java.lang.String getInputFormat() { - java.lang.Object ref = inputFormat_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - inputFormat_ = s; - } - return s; - } - } - /** - * optional string input_format = 2; - */ - public com.google.protobuf.ByteString - getInputFormatBytes() { - java.lang.Object ref = inputFormat_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - inputFormat_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string output_format = 3; - public static final int OUTPUT_FORMAT_FIELD_NUMBER = 3; - private java.lang.Object outputFormat_; - /** - * optional string output_format = 3; - */ - public boolean hasOutputFormat() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string output_format = 3; - */ - public java.lang.String getOutputFormat() { - java.lang.Object ref = outputFormat_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - outputFormat_ = s; - } - return s; - } - } - /** - * optional string output_format = 3; - */ - public com.google.protobuf.ByteString - getOutputFormatBytes() { - java.lang.Object ref = outputFormat_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - outputFormat_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional bool is_compressed = 4; - public static final int IS_COMPRESSED_FIELD_NUMBER = 4; - private boolean isCompressed_; - /** - * optional bool is_compressed = 4; - */ - public boolean hasIsCompressed() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool is_compressed = 4; - */ - public boolean getIsCompressed() { - return isCompressed_; - } - - // optional sint32 num_buckets = 5; - public static final int NUM_BUCKETS_FIELD_NUMBER = 5; - private int numBuckets_; - /** - * optional sint32 num_buckets = 5; - */ - public boolean hasNumBuckets() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional sint32 num_buckets = 5; - */ - public int getNumBuckets() { - return numBuckets_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - public static final int SERDE_INFO_FIELD_NUMBER = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo serdeInfo_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public boolean hasSerdeInfo() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo() { - return serdeInfo_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { - return serdeInfo_; - } - - // repeated string bucket_cols = 7; - public static final int BUCKET_COLS_FIELD_NUMBER = 7; - private com.google.protobuf.LazyStringList bucketCols_; - /** - * repeated string bucket_cols = 7; - */ - public java.util.List - getBucketColsList() { - return bucketCols_; - } - /** - * repeated string bucket_cols = 7; - */ - public int getBucketColsCount() { - return bucketCols_.size(); - } - /** - * repeated string bucket_cols = 7; - */ - public java.lang.String getBucketCols(int index) { - return bucketCols_.get(index); - } - /** - * repeated string bucket_cols = 7; - */ - public com.google.protobuf.ByteString - getBucketColsBytes(int index) { - return bucketCols_.getByteString(index); - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - public static final int SORT_COLS_FIELD_NUMBER = 8; - private java.util.List sortCols_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public java.util.List getSortColsList() { - return sortCols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public java.util.List - getSortColsOrBuilderList() { - return sortCols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public int getSortColsCount() { - return sortCols_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index) { - return sortCols_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( - int index) { - return sortCols_.get(index); - } - - // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - public static final int SKEWED_INFO_FIELD_NUMBER = 9; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo skewedInfo_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public boolean hasSkewedInfo() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo() { - return skewedInfo_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder() { - return skewedInfo_; - } - - // optional bool stored_as_sub_directories = 10; - public static final int STORED_AS_SUB_DIRECTORIES_FIELD_NUMBER = 10; - private boolean storedAsSubDirectories_; - /** - * optional bool stored_as_sub_directories = 10; - */ - public boolean hasStoredAsSubDirectories() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional bool stored_as_sub_directories = 10; - */ - public boolean getStoredAsSubDirectories() { - return storedAsSubDirectories_; - } - - private void initFields() { - cols_ = java.util.Collections.emptyList(); - inputFormat_ = ""; - outputFormat_ = ""; - isCompressed_ = false; - numBuckets_ = 0; - serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); - bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; - sortCols_ = java.util.Collections.emptyList(); - skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); - storedAsSubDirectories_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasSerdeInfo()) { - if (!getSerdeInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getSortColsCount(); i++) { - if (!getSortCols(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasSkewedInfo()) { - if (!getSkewedInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < cols_.size(); i++) { - output.writeMessage(1, cols_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(2, getInputFormatBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(3, getOutputFormatBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(4, isCompressed_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeSInt32(5, numBuckets_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeMessage(6, serdeInfo_); - } - for (int i = 0; i < bucketCols_.size(); i++) { - output.writeBytes(7, bucketCols_.getByteString(i)); - } - for (int i = 0; i < sortCols_.size(); i++) { - output.writeMessage(8, sortCols_.get(i)); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeMessage(9, skewedInfo_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeBool(10, storedAsSubDirectories_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < cols_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, cols_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getInputFormatBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getOutputFormatBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, isCompressed_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt32Size(5, numBuckets_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, serdeInfo_); - } - { - int dataSize = 0; - for (int i = 0; i < bucketCols_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(bucketCols_.getByteString(i)); - } - size += dataSize; - size += 1 * getBucketColsList().size(); - } - for (int i = 0; i < sortCols_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(8, sortCols_.get(i)); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, skewedInfo_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(10, storedAsSubDirectories_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptorOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getColsFieldBuilder(); - getSerdeInfoFieldBuilder(); - getSortColsFieldBuilder(); - getSkewedInfoFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - colsBuilder_.clear(); - } - inputFormat_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - outputFormat_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - isCompressed_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - numBuckets_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - if (serdeInfoBuilder_ == null) { - serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); - } else { - serdeInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000040); - if (sortColsBuilder_ == null) { - sortCols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); - } else { - sortColsBuilder_.clear(); - } - if (skewedInfoBuilder_ == null) { - skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); - } else { - skewedInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - storedAsSubDirectories_ = false; - bitField0_ = (bitField0_ & ~0x00000200); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (colsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.cols_ = cols_; - } else { - result.cols_ = colsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.inputFormat_ = inputFormat_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.outputFormat_ = outputFormat_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.isCompressed_ = isCompressed_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.numBuckets_ = numBuckets_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000010; - } - if (serdeInfoBuilder_ == null) { - result.serdeInfo_ = serdeInfo_; - } else { - result.serdeInfo_ = serdeInfoBuilder_.build(); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - bucketCols_ = new com.google.protobuf.UnmodifiableLazyStringList( - bucketCols_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.bucketCols_ = bucketCols_; - if (sortColsBuilder_ == null) { - if (((bitField0_ & 0x00000080) == 0x00000080)) { - sortCols_ = java.util.Collections.unmodifiableList(sortCols_); - bitField0_ = (bitField0_ & ~0x00000080); - } - result.sortCols_ = sortCols_; - } else { - result.sortCols_ = sortColsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000020; - } - if (skewedInfoBuilder_ == null) { - result.skewedInfo_ = skewedInfo_; - } else { - result.skewedInfo_ = skewedInfoBuilder_.build(); - } - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000040; - } - result.storedAsSubDirectories_ = storedAsSubDirectories_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.getDefaultInstance()) return this; - if (colsBuilder_ == null) { - if (!other.cols_.isEmpty()) { - if (cols_.isEmpty()) { - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureColsIsMutable(); - cols_.addAll(other.cols_); - } - onChanged(); - } - } else { - if (!other.cols_.isEmpty()) { - if (colsBuilder_.isEmpty()) { - colsBuilder_.dispose(); - colsBuilder_ = null; - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000001); - colsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColsFieldBuilder() : null; - } else { - colsBuilder_.addAllMessages(other.cols_); - } - } - } - if (other.hasInputFormat()) { - bitField0_ |= 0x00000002; - inputFormat_ = other.inputFormat_; - onChanged(); - } - if (other.hasOutputFormat()) { - bitField0_ |= 0x00000004; - outputFormat_ = other.outputFormat_; - onChanged(); - } - if (other.hasIsCompressed()) { - setIsCompressed(other.getIsCompressed()); - } - if (other.hasNumBuckets()) { - setNumBuckets(other.getNumBuckets()); - } - if (other.hasSerdeInfo()) { - mergeSerdeInfo(other.getSerdeInfo()); - } - if (!other.bucketCols_.isEmpty()) { - if (bucketCols_.isEmpty()) { - bucketCols_ = other.bucketCols_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureBucketColsIsMutable(); - bucketCols_.addAll(other.bucketCols_); - } - onChanged(); - } - if (sortColsBuilder_ == null) { - if (!other.sortCols_.isEmpty()) { - if (sortCols_.isEmpty()) { - sortCols_ = other.sortCols_; - bitField0_ = (bitField0_ & ~0x00000080); - } else { - ensureSortColsIsMutable(); - sortCols_.addAll(other.sortCols_); - } - onChanged(); - } - } else { - if (!other.sortCols_.isEmpty()) { - if (sortColsBuilder_.isEmpty()) { - sortColsBuilder_.dispose(); - sortColsBuilder_ = null; - sortCols_ = other.sortCols_; - bitField0_ = (bitField0_ & ~0x00000080); - sortColsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getSortColsFieldBuilder() : null; - } else { - sortColsBuilder_.addAllMessages(other.sortCols_); - } - } - } - if (other.hasSkewedInfo()) { - mergeSkewedInfo(other.getSkewedInfo()); - } - if (other.hasStoredAsSubDirectories()) { - setStoredAsSubDirectories(other.getStoredAsSubDirectories()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - - return false; - } - } - if (hasSerdeInfo()) { - if (!getSerdeInfo().isInitialized()) { - - return false; - } - } - for (int i = 0; i < getSortColsCount(); i++) { - if (!getSortCols(i).isInitialized()) { - - return false; - } - } - if (hasSkewedInfo()) { - if (!getSkewedInfo().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - private java.util.List cols_ = - java.util.Collections.emptyList(); - private void ensureColsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - cols_ = new java.util.ArrayList(cols_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> colsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public java.util.List getColsList() { - if (colsBuilder_ == null) { - return java.util.Collections.unmodifiableList(cols_); - } else { - return colsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public int getColsCount() { - if (colsBuilder_ == null) { - return cols_.size(); - } else { - return colsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index) { - if (colsBuilder_ == null) { - return cols_.get(index); - } else { - return colsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.set(index, value); - onChanged(); - } else { - colsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.set(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(value); - onChanged(); - } else { - colsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(index, value); - onChanged(); - } else { - colsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder addCols( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder addAllCols( - java.lang.Iterable values) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - super.addAll(values, cols_); - onChanged(); - } else { - colsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder clearCols() { - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - colsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public Builder removeCols(int index) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.remove(index); - onChanged(); - } else { - colsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder getColsBuilder( - int index) { - return getColsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( - int index) { - if (colsBuilder_ == null) { - return cols_.get(index); } else { - return colsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public java.util.List - getColsOrBuilderList() { - if (colsBuilder_ != null) { - return colsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(cols_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addColsBuilder() { - return getColsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addColsBuilder( - int index) { - return getColsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; - */ - public java.util.List - getColsBuilderList() { - return getColsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> - getColsFieldBuilder() { - if (colsBuilder_ == null) { - colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder>( - cols_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - cols_ = null; - } - return colsBuilder_; - } - - // optional string input_format = 2; - private java.lang.Object inputFormat_ = ""; - /** - * optional string input_format = 2; - */ - public boolean hasInputFormat() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string input_format = 2; - */ - public java.lang.String getInputFormat() { - java.lang.Object ref = inputFormat_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - inputFormat_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string input_format = 2; - */ - public com.google.protobuf.ByteString - getInputFormatBytes() { - java.lang.Object ref = inputFormat_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - inputFormat_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string input_format = 2; - */ - public Builder setInputFormat( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - inputFormat_ = value; - onChanged(); - return this; - } - /** - * optional string input_format = 2; - */ - public Builder clearInputFormat() { - bitField0_ = (bitField0_ & ~0x00000002); - inputFormat_ = getDefaultInstance().getInputFormat(); - onChanged(); - return this; - } - /** - * optional string input_format = 2; - */ - public Builder setInputFormatBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - inputFormat_ = value; - onChanged(); - return this; - } - - // optional string output_format = 3; - private java.lang.Object outputFormat_ = ""; - /** - * optional string output_format = 3; - */ - public boolean hasOutputFormat() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string output_format = 3; - */ - public java.lang.String getOutputFormat() { - java.lang.Object ref = outputFormat_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - outputFormat_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string output_format = 3; - */ - public com.google.protobuf.ByteString - getOutputFormatBytes() { - java.lang.Object ref = outputFormat_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - outputFormat_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string output_format = 3; - */ - public Builder setOutputFormat( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - outputFormat_ = value; - onChanged(); - return this; - } - /** - * optional string output_format = 3; - */ - public Builder clearOutputFormat() { - bitField0_ = (bitField0_ & ~0x00000004); - outputFormat_ = getDefaultInstance().getOutputFormat(); - onChanged(); - return this; - } - /** - * optional string output_format = 3; - */ - public Builder setOutputFormatBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - outputFormat_ = value; - onChanged(); - return this; - } - - // optional bool is_compressed = 4; - private boolean isCompressed_ ; - /** - * optional bool is_compressed = 4; - */ - public boolean hasIsCompressed() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool is_compressed = 4; - */ - public boolean getIsCompressed() { - return isCompressed_; - } - /** - * optional bool is_compressed = 4; - */ - public Builder setIsCompressed(boolean value) { - bitField0_ |= 0x00000008; - isCompressed_ = value; - onChanged(); - return this; - } - /** - * optional bool is_compressed = 4; - */ - public Builder clearIsCompressed() { - bitField0_ = (bitField0_ & ~0x00000008); - isCompressed_ = false; - onChanged(); - return this; - } - - // optional sint32 num_buckets = 5; - private int numBuckets_ ; - /** - * optional sint32 num_buckets = 5; - */ - public boolean hasNumBuckets() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional sint32 num_buckets = 5; - */ - public int getNumBuckets() { - return numBuckets_; - } - /** - * optional sint32 num_buckets = 5; - */ - public Builder setNumBuckets(int value) { - bitField0_ |= 0x00000010; - numBuckets_ = value; - onChanged(); - return this; - } - /** - * optional sint32 num_buckets = 5; - */ - public Builder clearNumBuckets() { - bitField0_ = (bitField0_ & ~0x00000010); - numBuckets_ = 0; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder> serdeInfoBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public boolean hasSerdeInfo() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo() { - if (serdeInfoBuilder_ == null) { - return serdeInfo_; - } else { - return serdeInfoBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public Builder setSerdeInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo value) { - if (serdeInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - serdeInfo_ = value; - onChanged(); - } else { - serdeInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public Builder setSerdeInfo( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder builderForValue) { - if (serdeInfoBuilder_ == null) { - serdeInfo_ = builderForValue.build(); - onChanged(); - } else { - serdeInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public Builder mergeSerdeInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo value) { - if (serdeInfoBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020) && - serdeInfo_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance()) { - serdeInfo_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder(serdeInfo_).mergeFrom(value).buildPartial(); - } else { - serdeInfo_ = value; - } - onChanged(); - } else { - serdeInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public Builder clearSerdeInfo() { - if (serdeInfoBuilder_ == null) { - serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); - onChanged(); - } else { - serdeInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder getSerdeInfoBuilder() { - bitField0_ |= 0x00000020; - onChanged(); - return getSerdeInfoFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { - if (serdeInfoBuilder_ != null) { - return serdeInfoBuilder_.getMessageOrBuilder(); - } else { - return serdeInfo_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder> - getSerdeInfoFieldBuilder() { - if (serdeInfoBuilder_ == null) { - serdeInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder>( - serdeInfo_, - getParentForChildren(), - isClean()); - serdeInfo_ = null; - } - return serdeInfoBuilder_; - } - - // repeated string bucket_cols = 7; - private com.google.protobuf.LazyStringList bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureBucketColsIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { - bucketCols_ = new com.google.protobuf.LazyStringArrayList(bucketCols_); - bitField0_ |= 0x00000040; - } - } - /** - * repeated string bucket_cols = 7; - */ - public java.util.List - getBucketColsList() { - return java.util.Collections.unmodifiableList(bucketCols_); - } - /** - * repeated string bucket_cols = 7; - */ - public int getBucketColsCount() { - return bucketCols_.size(); - } - /** - * repeated string bucket_cols = 7; - */ - public java.lang.String getBucketCols(int index) { - return bucketCols_.get(index); - } - /** - * repeated string bucket_cols = 7; - */ - public com.google.protobuf.ByteString - getBucketColsBytes(int index) { - return bucketCols_.getByteString(index); - } - /** - * repeated string bucket_cols = 7; - */ - public Builder setBucketCols( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureBucketColsIsMutable(); - bucketCols_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string bucket_cols = 7; - */ - public Builder addBucketCols( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureBucketColsIsMutable(); - bucketCols_.add(value); - onChanged(); - return this; - } - /** - * repeated string bucket_cols = 7; - */ - public Builder addAllBucketCols( - java.lang.Iterable values) { - ensureBucketColsIsMutable(); - super.addAll(values, bucketCols_); - onChanged(); - return this; - } - /** - * repeated string bucket_cols = 7; - */ - public Builder clearBucketCols() { - bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000040); - onChanged(); - return this; - } - /** - * repeated string bucket_cols = 7; - */ - public Builder addBucketColsBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureBucketColsIsMutable(); - bucketCols_.add(value); - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - private java.util.List sortCols_ = - java.util.Collections.emptyList(); - private void ensureSortColsIsMutable() { - if (!((bitField0_ & 0x00000080) == 0x00000080)) { - sortCols_ = new java.util.ArrayList(sortCols_); - bitField0_ |= 0x00000080; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder> sortColsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public java.util.List getSortColsList() { - if (sortColsBuilder_ == null) { - return java.util.Collections.unmodifiableList(sortCols_); - } else { - return sortColsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public int getSortColsCount() { - if (sortColsBuilder_ == null) { - return sortCols_.size(); - } else { - return sortColsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index) { - if (sortColsBuilder_ == null) { - return sortCols_.get(index); - } else { - return sortColsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder setSortCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { - if (sortColsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSortColsIsMutable(); - sortCols_.set(index, value); - onChanged(); - } else { - sortColsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder setSortCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { - if (sortColsBuilder_ == null) { - ensureSortColsIsMutable(); - sortCols_.set(index, builderForValue.build()); - onChanged(); - } else { - sortColsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder addSortCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { - if (sortColsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSortColsIsMutable(); - sortCols_.add(value); - onChanged(); - } else { - sortColsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder addSortCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { - if (sortColsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureSortColsIsMutable(); - sortCols_.add(index, value); - onChanged(); - } else { - sortColsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder addSortCols( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { - if (sortColsBuilder_ == null) { - ensureSortColsIsMutable(); - sortCols_.add(builderForValue.build()); - onChanged(); - } else { - sortColsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder addSortCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { - if (sortColsBuilder_ == null) { - ensureSortColsIsMutable(); - sortCols_.add(index, builderForValue.build()); - onChanged(); - } else { - sortColsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder addAllSortCols( - java.lang.Iterable values) { - if (sortColsBuilder_ == null) { - ensureSortColsIsMutable(); - super.addAll(values, sortCols_); - onChanged(); - } else { - sortColsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder clearSortCols() { - if (sortColsBuilder_ == null) { - sortCols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); - onChanged(); - } else { - sortColsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public Builder removeSortCols(int index) { - if (sortColsBuilder_ == null) { - ensureSortColsIsMutable(); - sortCols_.remove(index); - onChanged(); - } else { - sortColsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder getSortColsBuilder( - int index) { - return getSortColsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( - int index) { - if (sortColsBuilder_ == null) { - return sortCols_.get(index); } else { - return sortColsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public java.util.List - getSortColsOrBuilderList() { - if (sortColsBuilder_ != null) { - return sortColsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(sortCols_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder addSortColsBuilder() { - return getSortColsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder addSortColsBuilder( - int index) { - return getSortColsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; - */ - public java.util.List - getSortColsBuilderList() { - return getSortColsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder> - getSortColsFieldBuilder() { - if (sortColsBuilder_ == null) { - sortColsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder>( - sortCols_, - ((bitField0_ & 0x00000080) == 0x00000080), - getParentForChildren(), - isClean()); - sortCols_ = null; - } - return sortColsBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder> skewedInfoBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public boolean hasSkewedInfo() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo() { - if (skewedInfoBuilder_ == null) { - return skewedInfo_; - } else { - return skewedInfoBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public Builder setSkewedInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo value) { - if (skewedInfoBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - skewedInfo_ = value; - onChanged(); - } else { - skewedInfoBuilder_.setMessage(value); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public Builder setSkewedInfo( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder builderForValue) { - if (skewedInfoBuilder_ == null) { - skewedInfo_ = builderForValue.build(); - onChanged(); - } else { - skewedInfoBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public Builder mergeSkewedInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo value) { - if (skewedInfoBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100) && - skewedInfo_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance()) { - skewedInfo_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder(skewedInfo_).mergeFrom(value).buildPartial(); - } else { - skewedInfo_ = value; - } - onChanged(); - } else { - skewedInfoBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public Builder clearSkewedInfo() { - if (skewedInfoBuilder_ == null) { - skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); - onChanged(); - } else { - skewedInfoBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder getSkewedInfoBuilder() { - bitField0_ |= 0x00000100; - onChanged(); - return getSkewedInfoFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder() { - if (skewedInfoBuilder_ != null) { - return skewedInfoBuilder_.getMessageOrBuilder(); - } else { - return skewedInfo_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder> - getSkewedInfoFieldBuilder() { - if (skewedInfoBuilder_ == null) { - skewedInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder>( - skewedInfo_, - getParentForChildren(), - isClean()); - skewedInfo_ = null; - } - return skewedInfoBuilder_; - } - - // optional bool stored_as_sub_directories = 10; - private boolean storedAsSubDirectories_ ; - /** - * optional bool stored_as_sub_directories = 10; - */ - public boolean hasStoredAsSubDirectories() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional bool stored_as_sub_directories = 10; - */ - public boolean getStoredAsSubDirectories() { - return storedAsSubDirectories_; - } - /** - * optional bool stored_as_sub_directories = 10; - */ - public Builder setStoredAsSubDirectories(boolean value) { - bitField0_ |= 0x00000200; - storedAsSubDirectories_ = value; - onChanged(); - return this; - } - /** - * optional bool stored_as_sub_directories = 10; - */ - public Builder clearStoredAsSubDirectories() { - bitField0_ = (bitField0_ & ~0x00000200); - storedAsSubDirectories_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor) - } - - static { - defaultInstance = new StorageDescriptor(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor) - } - - public interface TableOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string owner = 1; - /** - * optional string owner = 1; - */ - boolean hasOwner(); - /** - * optional string owner = 1; - */ - java.lang.String getOwner(); - /** - * optional string owner = 1; - */ - com.google.protobuf.ByteString - getOwnerBytes(); - - // optional int64 create_time = 2; - /** - * optional int64 create_time = 2; - */ - boolean hasCreateTime(); - /** - * optional int64 create_time = 2; - */ - long getCreateTime(); - - // optional int64 last_access_time = 3; - /** - * optional int64 last_access_time = 3; - */ - boolean hasLastAccessTime(); - /** - * optional int64 last_access_time = 3; - */ - long getLastAccessTime(); - - // optional int64 retention = 4; - /** - * optional int64 retention = 4; - */ - boolean hasRetention(); - /** - * optional int64 retention = 4; - */ - long getRetention(); - - // optional string location = 5; - /** - * optional string location = 5; - */ - boolean hasLocation(); - /** - * optional string location = 5; - */ - java.lang.String getLocation(); - /** - * optional string location = 5; - */ - com.google.protobuf.ByteString - getLocationBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-     * storage descriptor parameters
-     * 
- */ - boolean hasSdParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-     * storage descriptor parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-     * storage descriptor parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); - - // required bytes sd_hash = 7; - /** - * required bytes sd_hash = 7; - */ - boolean hasSdHash(); - /** - * required bytes sd_hash = 7; - */ - com.google.protobuf.ByteString getSdHash(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - java.util.List - getPartitionKeysList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - int getPartitionKeysCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - java.util.List - getPartitionKeysOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( - int index); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - boolean hasParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); - - // optional string view_original_text = 10; - /** - * optional string view_original_text = 10; - */ - boolean hasViewOriginalText(); - /** - * optional string view_original_text = 10; - */ - java.lang.String getViewOriginalText(); - /** - * optional string view_original_text = 10; - */ - com.google.protobuf.ByteString - getViewOriginalTextBytes(); - - // optional string view_expanded_text = 11; - /** - * optional string view_expanded_text = 11; - */ - boolean hasViewExpandedText(); - /** - * optional string view_expanded_text = 11; - */ - java.lang.String getViewExpandedText(); - /** - * optional string view_expanded_text = 11; - */ - com.google.protobuf.ByteString - getViewExpandedTextBytes(); - - // optional string table_type = 12; - /** - * optional string table_type = 12; - */ - boolean hasTableType(); - /** - * optional string table_type = 12; - */ - java.lang.String getTableType(); - /** - * optional string table_type = 12; - */ - com.google.protobuf.ByteString - getTableTypeBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - boolean hasPrivileges(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); - - // optional bool is_temporary = 14; - /** - * optional bool is_temporary = 14; - */ - boolean hasIsTemporary(); - /** - * optional bool is_temporary = 14; - */ - boolean getIsTemporary(); - - // optional bool is_rewrite_enabled = 15; - /** - * optional bool is_rewrite_enabled = 15; - */ - boolean hasIsRewriteEnabled(); - /** - * optional bool is_rewrite_enabled = 15; - */ - boolean getIsRewriteEnabled(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Table} - */ - public static final class Table extends - com.google.protobuf.GeneratedMessage - implements TableOrBuilder { - // Use Table.newBuilder() to construct. - private Table(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Table defaultInstance; - public static Table getDefaultInstance() { - return defaultInstance; - } - - public Table getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Table( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - owner_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - createTime_ = input.readInt64(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - lastAccessTime_ = input.readInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - retention_ = input.readInt64(); - break; - } - case 42: { - bitField0_ |= 0x00000010; - location_ = input.readBytes(); - break; - } - case 50: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000020) == 0x00000020)) { - subBuilder = sdParameters_.toBuilder(); - } - sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(sdParameters_); - sdParameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000020; - break; - } - case 58: { - bitField0_ |= 0x00000040; - sdHash_ = input.readBytes(); - break; - } - case 66: { - if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { - partitionKeys_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000080; - } - partitionKeys_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.PARSER, extensionRegistry)); - break; - } - case 74: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000080) == 0x00000080)) { - subBuilder = parameters_.toBuilder(); - } - parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(parameters_); - parameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000080; - break; - } - case 82: { - bitField0_ |= 0x00000100; - viewOriginalText_ = input.readBytes(); - break; - } - case 90: { - bitField0_ |= 0x00000200; - viewExpandedText_ = input.readBytes(); - break; - } - case 98: { - bitField0_ |= 0x00000400; - tableType_ = input.readBytes(); - break; - } - case 106: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; - if (((bitField0_ & 0x00000800) == 0x00000800)) { - subBuilder = privileges_.toBuilder(); - } - privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(privileges_); - privileges_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000800; - break; - } - case 112: { - bitField0_ |= 0x00001000; - isTemporary_ = input.readBool(); - break; - } - case 120: { - bitField0_ |= 0x00002000; - isRewriteEnabled_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { - partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.Builder.class); - } - - public static com.google.protobuf.Parser
PARSER = - new com.google.protobuf.AbstractParser
() { - public Table parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Table(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser
getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string owner = 1; - public static final int OWNER_FIELD_NUMBER = 1; - private java.lang.Object owner_; - /** - * optional string owner = 1; - */ - public boolean hasOwner() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string owner = 1; - */ - public java.lang.String getOwner() { - java.lang.Object ref = owner_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - owner_ = s; - } - return s; - } - } - /** - * optional string owner = 1; - */ - public com.google.protobuf.ByteString - getOwnerBytes() { - java.lang.Object ref = owner_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - owner_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional int64 create_time = 2; - public static final int CREATE_TIME_FIELD_NUMBER = 2; - private long createTime_; - /** - * optional int64 create_time = 2; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 create_time = 2; - */ - public long getCreateTime() { - return createTime_; - } - - // optional int64 last_access_time = 3; - public static final int LAST_ACCESS_TIME_FIELD_NUMBER = 3; - private long lastAccessTime_; - /** - * optional int64 last_access_time = 3; - */ - public boolean hasLastAccessTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 last_access_time = 3; - */ - public long getLastAccessTime() { - return lastAccessTime_; - } - - // optional int64 retention = 4; - public static final int RETENTION_FIELD_NUMBER = 4; - private long retention_; - /** - * optional int64 retention = 4; - */ - public boolean hasRetention() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional int64 retention = 4; - */ - public long getRetention() { - return retention_; - } - - // optional string location = 5; - public static final int LOCATION_FIELD_NUMBER = 5; - private java.lang.Object location_; - /** - * optional string location = 5; - */ - public boolean hasLocation() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional string location = 5; - */ - public java.lang.String getLocation() { - java.lang.Object ref = location_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - location_ = s; - } - return s; - } - } - /** - * optional string location = 5; - */ - public com.google.protobuf.ByteString - getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - public static final int SD_PARAMETERS_FIELD_NUMBER = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-     * storage descriptor parameters
-     * 
- */ - public boolean hasSdParameters() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-     * storage descriptor parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { - return sdParameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-     * storage descriptor parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { - return sdParameters_; - } - - // required bytes sd_hash = 7; - public static final int SD_HASH_FIELD_NUMBER = 7; - private com.google.protobuf.ByteString sdHash_; - /** - * required bytes sd_hash = 7; - */ - public boolean hasSdHash() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * required bytes sd_hash = 7; - */ - public com.google.protobuf.ByteString getSdHash() { - return sdHash_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - public static final int PARTITION_KEYS_FIELD_NUMBER = 8; - private java.util.List partitionKeys_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public java.util.List getPartitionKeysList() { - return partitionKeys_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public java.util.List - getPartitionKeysOrBuilderList() { - return partitionKeys_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public int getPartitionKeysCount() { - return partitionKeys_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index) { - return partitionKeys_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( - int index) { - return partitionKeys_.get(index); - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - public static final int PARAMETERS_FIELD_NUMBER = 9; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - return parameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - return parameters_; - } - - // optional string view_original_text = 10; - public static final int VIEW_ORIGINAL_TEXT_FIELD_NUMBER = 10; - private java.lang.Object viewOriginalText_; - /** - * optional string view_original_text = 10; - */ - public boolean hasViewOriginalText() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional string view_original_text = 10; - */ - public java.lang.String getViewOriginalText() { - java.lang.Object ref = viewOriginalText_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - viewOriginalText_ = s; - } - return s; - } - } - /** - * optional string view_original_text = 10; - */ - public com.google.protobuf.ByteString - getViewOriginalTextBytes() { - java.lang.Object ref = viewOriginalText_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - viewOriginalText_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string view_expanded_text = 11; - public static final int VIEW_EXPANDED_TEXT_FIELD_NUMBER = 11; - private java.lang.Object viewExpandedText_; - /** - * optional string view_expanded_text = 11; - */ - public boolean hasViewExpandedText() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional string view_expanded_text = 11; - */ - public java.lang.String getViewExpandedText() { - java.lang.Object ref = viewExpandedText_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - viewExpandedText_ = s; - } - return s; - } - } - /** - * optional string view_expanded_text = 11; - */ - public com.google.protobuf.ByteString - getViewExpandedTextBytes() { - java.lang.Object ref = viewExpandedText_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - viewExpandedText_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string table_type = 12; - public static final int TABLE_TYPE_FIELD_NUMBER = 12; - private java.lang.Object tableType_; - /** - * optional string table_type = 12; - */ - public boolean hasTableType() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional string table_type = 12; - */ - public java.lang.String getTableType() { - java.lang.Object ref = tableType_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - tableType_ = s; - } - return s; - } - } - /** - * optional string table_type = 12; - */ - public com.google.protobuf.ByteString - getTableTypeBytes() { - java.lang.Object ref = tableType_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - tableType_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - public static final int PRIVILEGES_FIELD_NUMBER = 13; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public boolean hasPrivileges() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { - return privileges_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { - return privileges_; - } - - // optional bool is_temporary = 14; - public static final int IS_TEMPORARY_FIELD_NUMBER = 14; - private boolean isTemporary_; - /** - * optional bool is_temporary = 14; - */ - public boolean hasIsTemporary() { - return ((bitField0_ & 0x00001000) == 0x00001000); - } - /** - * optional bool is_temporary = 14; - */ - public boolean getIsTemporary() { - return isTemporary_; - } - - // optional bool is_rewrite_enabled = 15; - public static final int IS_REWRITE_ENABLED_FIELD_NUMBER = 15; - private boolean isRewriteEnabled_; - /** - * optional bool is_rewrite_enabled = 15; - */ - public boolean hasIsRewriteEnabled() { - return ((bitField0_ & 0x00002000) == 0x00002000); - } - /** - * optional bool is_rewrite_enabled = 15; - */ - public boolean getIsRewriteEnabled() { - return isRewriteEnabled_; - } - - private void initFields() { - owner_ = ""; - createTime_ = 0L; - lastAccessTime_ = 0L; - retention_ = 0L; - location_ = ""; - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - sdHash_ = com.google.protobuf.ByteString.EMPTY; - partitionKeys_ = java.util.Collections.emptyList(); - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - viewOriginalText_ = ""; - viewExpandedText_ = ""; - tableType_ = ""; - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - isTemporary_ = false; - isRewriteEnabled_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasSdHash()) { - memoizedIsInitialized = 0; - return false; - } - if (hasSdParameters()) { - if (!getSdParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getPartitionKeysCount(); i++) { - if (!getPartitionKeys(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasParameters()) { - if (!getParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasPrivileges()) { - if (!getPrivileges().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getOwnerBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, createTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, lastAccessTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(4, retention_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, getLocationBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeMessage(6, sdParameters_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeBytes(7, sdHash_); - } - for (int i = 0; i < partitionKeys_.size(); i++) { - output.writeMessage(8, partitionKeys_.get(i)); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeMessage(9, parameters_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBytes(10, getViewOriginalTextBytes()); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeBytes(11, getViewExpandedTextBytes()); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeBytes(12, getTableTypeBytes()); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - output.writeMessage(13, privileges_); - } - if (((bitField0_ & 0x00001000) == 0x00001000)) { - output.writeBool(14, isTemporary_); - } - if (((bitField0_ & 0x00002000) == 0x00002000)) { - output.writeBool(15, isRewriteEnabled_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getOwnerBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, createTime_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, lastAccessTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, retention_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, getLocationBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, sdParameters_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(7, sdHash_); - } - for (int i = 0; i < partitionKeys_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(8, partitionKeys_.get(i)); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, parameters_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(10, getViewOriginalTextBytes()); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getViewExpandedTextBytes()); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(12, getTableTypeBytes()); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(13, privileges_); - } - if (((bitField0_ & 0x00001000) == 0x00001000)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(14, isTemporary_); - } - if (((bitField0_ & 0x00002000) == 0x00002000)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(15, isRewriteEnabled_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Table} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.TableOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSdParametersFieldBuilder(); - getPartitionKeysFieldBuilder(); - getParametersFieldBuilder(); - getPrivilegesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - owner_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - lastAccessTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - retention_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - location_ = ""; - bitField0_ = (bitField0_ & ~0x00000010); - if (sdParametersBuilder_ == null) { - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - sdParametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - sdHash_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000040); - if (partitionKeysBuilder_ == null) { - partitionKeys_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); - } else { - partitionKeysBuilder_.clear(); - } - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - viewOriginalText_ = ""; - bitField0_ = (bitField0_ & ~0x00000200); - viewExpandedText_ = ""; - bitField0_ = (bitField0_ & ~0x00000400); - tableType_ = ""; - bitField0_ = (bitField0_ & ~0x00000800); - if (privilegesBuilder_ == null) { - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - } else { - privilegesBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00001000); - isTemporary_ = false; - bitField0_ = (bitField0_ & ~0x00002000); - isRewriteEnabled_ = false; - bitField0_ = (bitField0_ & ~0x00004000); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.owner_ = owner_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.lastAccessTime_ = lastAccessTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.retention_ = retention_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.location_ = location_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - if (sdParametersBuilder_ == null) { - result.sdParameters_ = sdParameters_; - } else { - result.sdParameters_ = sdParametersBuilder_.build(); - } - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.sdHash_ = sdHash_; - if (partitionKeysBuilder_ == null) { - if (((bitField0_ & 0x00000080) == 0x00000080)) { - partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); - bitField0_ = (bitField0_ & ~0x00000080); - } - result.partitionKeys_ = partitionKeys_; - } else { - result.partitionKeys_ = partitionKeysBuilder_.build(); - } - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000080; - } - if (parametersBuilder_ == null) { - result.parameters_ = parameters_; - } else { - result.parameters_ = parametersBuilder_.build(); - } - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000100; - } - result.viewOriginalText_ = viewOriginalText_; - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000200; - } - result.viewExpandedText_ = viewExpandedText_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000400; - } - result.tableType_ = tableType_; - if (((from_bitField0_ & 0x00001000) == 0x00001000)) { - to_bitField0_ |= 0x00000800; - } - if (privilegesBuilder_ == null) { - result.privileges_ = privileges_; - } else { - result.privileges_ = privilegesBuilder_.build(); - } - if (((from_bitField0_ & 0x00002000) == 0x00002000)) { - to_bitField0_ |= 0x00001000; - } - result.isTemporary_ = isTemporary_; - if (((from_bitField0_ & 0x00004000) == 0x00004000)) { - to_bitField0_ |= 0x00002000; - } - result.isRewriteEnabled_ = isRewriteEnabled_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.getDefaultInstance()) return this; - if (other.hasOwner()) { - bitField0_ |= 0x00000001; - owner_ = other.owner_; - onChanged(); - } - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasLastAccessTime()) { - setLastAccessTime(other.getLastAccessTime()); - } - if (other.hasRetention()) { - setRetention(other.getRetention()); - } - if (other.hasLocation()) { - bitField0_ |= 0x00000010; - location_ = other.location_; - onChanged(); - } - if (other.hasSdParameters()) { - mergeSdParameters(other.getSdParameters()); - } - if (other.hasSdHash()) { - setSdHash(other.getSdHash()); - } - if (partitionKeysBuilder_ == null) { - if (!other.partitionKeys_.isEmpty()) { - if (partitionKeys_.isEmpty()) { - partitionKeys_ = other.partitionKeys_; - bitField0_ = (bitField0_ & ~0x00000080); - } else { - ensurePartitionKeysIsMutable(); - partitionKeys_.addAll(other.partitionKeys_); - } - onChanged(); - } - } else { - if (!other.partitionKeys_.isEmpty()) { - if (partitionKeysBuilder_.isEmpty()) { - partitionKeysBuilder_.dispose(); - partitionKeysBuilder_ = null; - partitionKeys_ = other.partitionKeys_; - bitField0_ = (bitField0_ & ~0x00000080); - partitionKeysBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getPartitionKeysFieldBuilder() : null; - } else { - partitionKeysBuilder_.addAllMessages(other.partitionKeys_); - } - } - } - if (other.hasParameters()) { - mergeParameters(other.getParameters()); - } - if (other.hasViewOriginalText()) { - bitField0_ |= 0x00000200; - viewOriginalText_ = other.viewOriginalText_; - onChanged(); - } - if (other.hasViewExpandedText()) { - bitField0_ |= 0x00000400; - viewExpandedText_ = other.viewExpandedText_; - onChanged(); - } - if (other.hasTableType()) { - bitField0_ |= 0x00000800; - tableType_ = other.tableType_; - onChanged(); - } - if (other.hasPrivileges()) { - mergePrivileges(other.getPrivileges()); - } - if (other.hasIsTemporary()) { - setIsTemporary(other.getIsTemporary()); - } - if (other.hasIsRewriteEnabled()) { - setIsRewriteEnabled(other.getIsRewriteEnabled()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasSdHash()) { - - return false; - } - if (hasSdParameters()) { - if (!getSdParameters().isInitialized()) { - - return false; - } - } - for (int i = 0; i < getPartitionKeysCount(); i++) { - if (!getPartitionKeys(i).isInitialized()) { - - return false; - } - } - if (hasParameters()) { - if (!getParameters().isInitialized()) { - - return false; - } - } - if (hasPrivileges()) { - if (!getPrivileges().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional string owner = 1; - private java.lang.Object owner_ = ""; - /** - * optional string owner = 1; - */ - public boolean hasOwner() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string owner = 1; - */ - public java.lang.String getOwner() { - java.lang.Object ref = owner_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - owner_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string owner = 1; - */ - public com.google.protobuf.ByteString - getOwnerBytes() { - java.lang.Object ref = owner_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - owner_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string owner = 1; - */ - public Builder setOwner( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - owner_ = value; - onChanged(); - return this; - } - /** - * optional string owner = 1; - */ - public Builder clearOwner() { - bitField0_ = (bitField0_ & ~0x00000001); - owner_ = getDefaultInstance().getOwner(); - onChanged(); - return this; - } - /** - * optional string owner = 1; - */ - public Builder setOwnerBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - owner_ = value; - onChanged(); - return this; - } - - // optional int64 create_time = 2; - private long createTime_ ; - /** - * optional int64 create_time = 2; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional int64 create_time = 2; - */ - public long getCreateTime() { - return createTime_; - } - /** - * optional int64 create_time = 2; - */ - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000002; - createTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 create_time = 2; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000002); - createTime_ = 0L; - onChanged(); - return this; - } - - // optional int64 last_access_time = 3; - private long lastAccessTime_ ; - /** - * optional int64 last_access_time = 3; - */ - public boolean hasLastAccessTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 last_access_time = 3; - */ - public long getLastAccessTime() { - return lastAccessTime_; - } - /** - * optional int64 last_access_time = 3; - */ - public Builder setLastAccessTime(long value) { - bitField0_ |= 0x00000004; - lastAccessTime_ = value; - onChanged(); - return this; - } - /** - * optional int64 last_access_time = 3; - */ - public Builder clearLastAccessTime() { - bitField0_ = (bitField0_ & ~0x00000004); - lastAccessTime_ = 0L; - onChanged(); - return this; - } - - // optional int64 retention = 4; - private long retention_ ; - /** - * optional int64 retention = 4; - */ - public boolean hasRetention() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional int64 retention = 4; - */ - public long getRetention() { - return retention_; - } - /** - * optional int64 retention = 4; - */ - public Builder setRetention(long value) { - bitField0_ |= 0x00000008; - retention_ = value; - onChanged(); - return this; - } - /** - * optional int64 retention = 4; - */ - public Builder clearRetention() { - bitField0_ = (bitField0_ & ~0x00000008); - retention_ = 0L; - onChanged(); - return this; - } - - // optional string location = 5; - private java.lang.Object location_ = ""; - /** - * optional string location = 5; - */ - public boolean hasLocation() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional string location = 5; - */ - public java.lang.String getLocation() { - java.lang.Object ref = location_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - location_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string location = 5; - */ - public com.google.protobuf.ByteString - getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string location = 5; - */ - public Builder setLocation( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - location_ = value; - onChanged(); - return this; - } - /** - * optional string location = 5; - */ - public Builder clearLocation() { - bitField0_ = (bitField0_ & ~0x00000010); - location_ = getDefaultInstance().getLocation(); - onChanged(); - return this; - } - /** - * optional string location = 5; - */ - public Builder setLocationBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - location_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public boolean hasSdParameters() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { - if (sdParametersBuilder_ == null) { - return sdParameters_; - } else { - return sdParametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (sdParametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - sdParameters_ = value; - onChanged(); - } else { - sdParametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder setSdParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (sdParametersBuilder_ == null) { - sdParameters_ = builderForValue.build(); - onChanged(); - } else { - sdParametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (sdParametersBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020) && - sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - sdParameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); - } else { - sdParameters_ = value; - } - onChanged(); - } else { - sdParametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000020; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder clearSdParameters() { - if (sdParametersBuilder_ == null) { - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - sdParametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000020); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { - bitField0_ |= 0x00000020; - onChanged(); - return getSdParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { - if (sdParametersBuilder_ != null) { - return sdParametersBuilder_.getMessageOrBuilder(); - } else { - return sdParameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; - * - *
-       * storage descriptor parameters
-       * 
- */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getSdParametersFieldBuilder() { - if (sdParametersBuilder_ == null) { - sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - sdParameters_, - getParentForChildren(), - isClean()); - sdParameters_ = null; - } - return sdParametersBuilder_; - } - - // required bytes sd_hash = 7; - private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes sd_hash = 7; - */ - public boolean hasSdHash() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * required bytes sd_hash = 7; - */ - public com.google.protobuf.ByteString getSdHash() { - return sdHash_; - } - /** - * required bytes sd_hash = 7; - */ - public Builder setSdHash(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000040; - sdHash_ = value; - onChanged(); - return this; - } - /** - * required bytes sd_hash = 7; - */ - public Builder clearSdHash() { - bitField0_ = (bitField0_ & ~0x00000040); - sdHash_ = getDefaultInstance().getSdHash(); - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - private java.util.List partitionKeys_ = - java.util.Collections.emptyList(); - private void ensurePartitionKeysIsMutable() { - if (!((bitField0_ & 0x00000080) == 0x00000080)) { - partitionKeys_ = new java.util.ArrayList(partitionKeys_); - bitField0_ |= 0x00000080; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> partitionKeysBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public java.util.List getPartitionKeysList() { - if (partitionKeysBuilder_ == null) { - return java.util.Collections.unmodifiableList(partitionKeys_); - } else { - return partitionKeysBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public int getPartitionKeysCount() { - if (partitionKeysBuilder_ == null) { - return partitionKeys_.size(); - } else { - return partitionKeysBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index) { - if (partitionKeysBuilder_ == null) { - return partitionKeys_.get(index); - } else { - return partitionKeysBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder setPartitionKeys( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { - if (partitionKeysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeysIsMutable(); - partitionKeys_.set(index, value); - onChanged(); - } else { - partitionKeysBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder setPartitionKeys( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { - if (partitionKeysBuilder_ == null) { - ensurePartitionKeysIsMutable(); - partitionKeys_.set(index, builderForValue.build()); - onChanged(); - } else { - partitionKeysBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder addPartitionKeys(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { - if (partitionKeysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeysIsMutable(); - partitionKeys_.add(value); - onChanged(); - } else { - partitionKeysBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder addPartitionKeys( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { - if (partitionKeysBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeysIsMutable(); - partitionKeys_.add(index, value); - onChanged(); - } else { - partitionKeysBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder addPartitionKeys( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { - if (partitionKeysBuilder_ == null) { - ensurePartitionKeysIsMutable(); - partitionKeys_.add(builderForValue.build()); - onChanged(); - } else { - partitionKeysBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder addPartitionKeys( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { - if (partitionKeysBuilder_ == null) { - ensurePartitionKeysIsMutable(); - partitionKeys_.add(index, builderForValue.build()); - onChanged(); - } else { - partitionKeysBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder addAllPartitionKeys( - java.lang.Iterable values) { - if (partitionKeysBuilder_ == null) { - ensurePartitionKeysIsMutable(); - super.addAll(values, partitionKeys_); - onChanged(); - } else { - partitionKeysBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder clearPartitionKeys() { - if (partitionKeysBuilder_ == null) { - partitionKeys_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); - onChanged(); - } else { - partitionKeysBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public Builder removePartitionKeys(int index) { - if (partitionKeysBuilder_ == null) { - ensurePartitionKeysIsMutable(); - partitionKeys_.remove(index); - onChanged(); - } else { - partitionKeysBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder getPartitionKeysBuilder( - int index) { - return getPartitionKeysFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( - int index) { - if (partitionKeysBuilder_ == null) { - return partitionKeys_.get(index); } else { - return partitionKeysBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public java.util.List - getPartitionKeysOrBuilderList() { - if (partitionKeysBuilder_ != null) { - return partitionKeysBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(partitionKeys_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addPartitionKeysBuilder() { - return getPartitionKeysFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addPartitionKeysBuilder( - int index) { - return getPartitionKeysFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; - */ - public java.util.List - getPartitionKeysBuilderList() { - return getPartitionKeysFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> - getPartitionKeysFieldBuilder() { - if (partitionKeysBuilder_ == null) { - partitionKeysBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder>( - partitionKeys_, - ((bitField0_ & 0x00000080) == 0x00000080), - getParentForChildren(), - isClean()); - partitionKeys_ = null; - } - return partitionKeysBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - if (parametersBuilder_ == null) { - return parameters_; - } else { - return parametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - parameters_ = value; - onChanged(); - } else { - parametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public Builder setParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (parametersBuilder_ == null) { - parameters_ = builderForValue.build(); - onChanged(); - } else { - parametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100) && - parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - parameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); - } else { - parameters_ = value; - } - onChanged(); - } else { - parametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000100; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public Builder clearParameters() { - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { - bitField0_ |= 0x00000100; - onChanged(); - return getParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - if (parametersBuilder_ != null) { - return parametersBuilder_.getMessageOrBuilder(); - } else { - return parameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getParametersFieldBuilder() { - if (parametersBuilder_ == null) { - parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - parameters_, - getParentForChildren(), - isClean()); - parameters_ = null; - } - return parametersBuilder_; - } - - // optional string view_original_text = 10; - private java.lang.Object viewOriginalText_ = ""; - /** - * optional string view_original_text = 10; - */ - public boolean hasViewOriginalText() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional string view_original_text = 10; - */ - public java.lang.String getViewOriginalText() { - java.lang.Object ref = viewOriginalText_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - viewOriginalText_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string view_original_text = 10; - */ - public com.google.protobuf.ByteString - getViewOriginalTextBytes() { - java.lang.Object ref = viewOriginalText_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - viewOriginalText_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string view_original_text = 10; - */ - public Builder setViewOriginalText( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - viewOriginalText_ = value; - onChanged(); - return this; - } - /** - * optional string view_original_text = 10; - */ - public Builder clearViewOriginalText() { - bitField0_ = (bitField0_ & ~0x00000200); - viewOriginalText_ = getDefaultInstance().getViewOriginalText(); - onChanged(); - return this; - } - /** - * optional string view_original_text = 10; - */ - public Builder setViewOriginalTextBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - viewOriginalText_ = value; - onChanged(); - return this; - } - - // optional string view_expanded_text = 11; - private java.lang.Object viewExpandedText_ = ""; - /** - * optional string view_expanded_text = 11; - */ - public boolean hasViewExpandedText() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional string view_expanded_text = 11; - */ - public java.lang.String getViewExpandedText() { - java.lang.Object ref = viewExpandedText_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - viewExpandedText_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string view_expanded_text = 11; - */ - public com.google.protobuf.ByteString - getViewExpandedTextBytes() { - java.lang.Object ref = viewExpandedText_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - viewExpandedText_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string view_expanded_text = 11; - */ - public Builder setViewExpandedText( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - viewExpandedText_ = value; - onChanged(); - return this; - } - /** - * optional string view_expanded_text = 11; - */ - public Builder clearViewExpandedText() { - bitField0_ = (bitField0_ & ~0x00000400); - viewExpandedText_ = getDefaultInstance().getViewExpandedText(); - onChanged(); - return this; - } - /** - * optional string view_expanded_text = 11; - */ - public Builder setViewExpandedTextBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - viewExpandedText_ = value; - onChanged(); - return this; - } - - // optional string table_type = 12; - private java.lang.Object tableType_ = ""; - /** - * optional string table_type = 12; - */ - public boolean hasTableType() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * optional string table_type = 12; - */ - public java.lang.String getTableType() { - java.lang.Object ref = tableType_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - tableType_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string table_type = 12; - */ - public com.google.protobuf.ByteString - getTableTypeBytes() { - java.lang.Object ref = tableType_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - tableType_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string table_type = 12; - */ - public Builder setTableType( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000800; - tableType_ = value; - onChanged(); - return this; - } - /** - * optional string table_type = 12; - */ - public Builder clearTableType() { - bitField0_ = (bitField0_ & ~0x00000800); - tableType_ = getDefaultInstance().getTableType(); - onChanged(); - return this; - } - /** - * optional string table_type = 12; - */ - public Builder setTableTypeBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000800; - tableType_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public boolean hasPrivileges() { - return ((bitField0_ & 0x00001000) == 0x00001000); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { - if (privilegesBuilder_ == null) { - return privileges_; - } else { - return privilegesBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { - if (privilegesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - privileges_ = value; - onChanged(); - } else { - privilegesBuilder_.setMessage(value); - } - bitField0_ |= 0x00001000; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder setPrivileges( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { - if (privilegesBuilder_ == null) { - privileges_ = builderForValue.build(); - onChanged(); - } else { - privilegesBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00001000; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { - if (privilegesBuilder_ == null) { - if (((bitField0_ & 0x00001000) == 0x00001000) && - privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { - privileges_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); - } else { - privileges_ = value; - } - onChanged(); - } else { - privilegesBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00001000; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder clearPrivileges() { - if (privilegesBuilder_ == null) { - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - onChanged(); - } else { - privilegesBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00001000); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { - bitField0_ |= 0x00001000; - onChanged(); - return getPrivilegesFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { - if (privilegesBuilder_ != null) { - return privilegesBuilder_.getMessageOrBuilder(); - } else { - return privileges_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> - getPrivilegesFieldBuilder() { - if (privilegesBuilder_ == null) { - privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( - privileges_, - getParentForChildren(), - isClean()); - privileges_ = null; - } - return privilegesBuilder_; - } - - // optional bool is_temporary = 14; - private boolean isTemporary_ ; - /** - * optional bool is_temporary = 14; - */ - public boolean hasIsTemporary() { - return ((bitField0_ & 0x00002000) == 0x00002000); - } - /** - * optional bool is_temporary = 14; - */ - public boolean getIsTemporary() { - return isTemporary_; - } - /** - * optional bool is_temporary = 14; - */ - public Builder setIsTemporary(boolean value) { - bitField0_ |= 0x00002000; - isTemporary_ = value; - onChanged(); - return this; - } - /** - * optional bool is_temporary = 14; - */ - public Builder clearIsTemporary() { - bitField0_ = (bitField0_ & ~0x00002000); - isTemporary_ = false; - onChanged(); - return this; - } - - // optional bool is_rewrite_enabled = 15; - private boolean isRewriteEnabled_ ; - /** - * optional bool is_rewrite_enabled = 15; - */ - public boolean hasIsRewriteEnabled() { - return ((bitField0_ & 0x00004000) == 0x00004000); - } - /** - * optional bool is_rewrite_enabled = 15; - */ - public boolean getIsRewriteEnabled() { - return isRewriteEnabled_; - } - /** - * optional bool is_rewrite_enabled = 15; - */ - public Builder setIsRewriteEnabled(boolean value) { - bitField0_ |= 0x00004000; - isRewriteEnabled_ = value; - onChanged(); - return this; - } - /** - * optional bool is_rewrite_enabled = 15; - */ - public Builder clearIsRewriteEnabled() { - bitField0_ = (bitField0_ & ~0x00004000); - isRewriteEnabled_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Table) - } - - static { - defaultInstance = new Table(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table) - } - - public interface IndexOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string indexHandlerClass = 1; - /** - * optional string indexHandlerClass = 1; - * - *
-     * reserved
-     * 
- */ - boolean hasIndexHandlerClass(); - /** - * optional string indexHandlerClass = 1; - * - *
-     * reserved
-     * 
- */ - java.lang.String getIndexHandlerClass(); - /** - * optional string indexHandlerClass = 1; - * - *
-     * reserved
-     * 
- */ - com.google.protobuf.ByteString - getIndexHandlerClassBytes(); - - // required string dbName = 2; - /** - * required string dbName = 2; - */ - boolean hasDbName(); - /** - * required string dbName = 2; - */ - java.lang.String getDbName(); - /** - * required string dbName = 2; - */ - com.google.protobuf.ByteString - getDbNameBytes(); - - // required string origTableName = 3; - /** - * required string origTableName = 3; - */ - boolean hasOrigTableName(); - /** - * required string origTableName = 3; - */ - java.lang.String getOrigTableName(); - /** - * required string origTableName = 3; - */ - com.google.protobuf.ByteString - getOrigTableNameBytes(); - - // optional string location = 4; - /** - * optional string location = 4; - */ - boolean hasLocation(); - /** - * optional string location = 4; - */ - java.lang.String getLocation(); - /** - * optional string location = 4; - */ - com.google.protobuf.ByteString - getLocationBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-     * storage descriptor parameters
-     * 
- */ - boolean hasSdParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-     * storage descriptor parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-     * storage descriptor parameters
-     * 
- */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); - - // optional int32 createTime = 6; - /** - * optional int32 createTime = 6; - */ - boolean hasCreateTime(); - /** - * optional int32 createTime = 6; - */ - int getCreateTime(); - - // optional int32 lastAccessTime = 7; - /** - * optional int32 lastAccessTime = 7; - */ - boolean hasLastAccessTime(); - /** - * optional int32 lastAccessTime = 7; - */ - int getLastAccessTime(); - - // optional string indexTableName = 8; - /** - * optional string indexTableName = 8; - */ - boolean hasIndexTableName(); - /** - * optional string indexTableName = 8; - */ - java.lang.String getIndexTableName(); - /** - * optional string indexTableName = 8; - */ - com.google.protobuf.ByteString - getIndexTableNameBytes(); - - // optional bytes sd_hash = 9; - /** - * optional bytes sd_hash = 9; - */ - boolean hasSdHash(); - /** - * optional bytes sd_hash = 9; - */ - com.google.protobuf.ByteString getSdHash(); - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - boolean hasParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); - - // optional bool deferredRebuild = 11; - /** - * optional bool deferredRebuild = 11; - */ - boolean hasDeferredRebuild(); - /** - * optional bool deferredRebuild = 11; - */ - boolean getDeferredRebuild(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Index} - */ - public static final class Index extends - com.google.protobuf.GeneratedMessage - implements IndexOrBuilder { - // Use Index.newBuilder() to construct. - private Index(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Index(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Index defaultInstance; - public static Index getDefaultInstance() { - return defaultInstance; - } - - public Index getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Index( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - indexHandlerClass_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - dbName_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - origTableName_ = input.readBytes(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - location_ = input.readBytes(); - break; - } - case 42: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000010) == 0x00000010)) { - subBuilder = sdParameters_.toBuilder(); - } - sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(sdParameters_); - sdParameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000010; - break; - } - case 48: { - bitField0_ |= 0x00000020; - createTime_ = input.readInt32(); - break; - } - case 56: { - bitField0_ |= 0x00000040; - lastAccessTime_ = input.readInt32(); - break; - } - case 66: { - bitField0_ |= 0x00000080; - indexTableName_ = input.readBytes(); - break; - } - case 74: { - bitField0_ |= 0x00000100; - sdHash_ = input.readBytes(); - break; - } - case 82: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; - if (((bitField0_ & 0x00000200) == 0x00000200)) { - subBuilder = parameters_.toBuilder(); - } - parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(parameters_); - parameters_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000200; - break; - } - case 88: { - bitField0_ |= 0x00000400; - deferredRebuild_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Index parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Index(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional string indexHandlerClass = 1; - public static final int INDEXHANDLERCLASS_FIELD_NUMBER = 1; - private java.lang.Object indexHandlerClass_; - /** - * optional string indexHandlerClass = 1; - * - *
-     * reserved
-     * 
- */ - public boolean hasIndexHandlerClass() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string indexHandlerClass = 1; - * - *
-     * reserved
-     * 
- */ - public java.lang.String getIndexHandlerClass() { - java.lang.Object ref = indexHandlerClass_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - indexHandlerClass_ = s; - } - return s; - } - } - /** - * optional string indexHandlerClass = 1; - * - *
-     * reserved
-     * 
- */ - public com.google.protobuf.ByteString - getIndexHandlerClassBytes() { - java.lang.Object ref = indexHandlerClass_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - indexHandlerClass_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string dbName = 2; - public static final int DBNAME_FIELD_NUMBER = 2; - private java.lang.Object dbName_; - /** - * required string dbName = 2; - */ - public boolean hasDbName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string dbName = 2; - */ - public java.lang.String getDbName() { - java.lang.Object ref = dbName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - dbName_ = s; - } - return s; - } - } - /** - * required string dbName = 2; - */ - public com.google.protobuf.ByteString - getDbNameBytes() { - java.lang.Object ref = dbName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - dbName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string origTableName = 3; - public static final int ORIGTABLENAME_FIELD_NUMBER = 3; - private java.lang.Object origTableName_; - /** - * required string origTableName = 3; - */ - public boolean hasOrigTableName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string origTableName = 3; - */ - public java.lang.String getOrigTableName() { - java.lang.Object ref = origTableName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - origTableName_ = s; - } - return s; - } - } - /** - * required string origTableName = 3; - */ - public com.google.protobuf.ByteString - getOrigTableNameBytes() { - java.lang.Object ref = origTableName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - origTableName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string location = 4; - public static final int LOCATION_FIELD_NUMBER = 4; - private java.lang.Object location_; - /** - * optional string location = 4; - */ - public boolean hasLocation() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional string location = 4; - */ - public java.lang.String getLocation() { - java.lang.Object ref = location_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - location_ = s; - } - return s; - } - } - /** - * optional string location = 4; - */ - public com.google.protobuf.ByteString - getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - public static final int SD_PARAMETERS_FIELD_NUMBER = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-     * storage descriptor parameters
-     * 
- */ - public boolean hasSdParameters() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-     * storage descriptor parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { - return sdParameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-     * storage descriptor parameters
-     * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { - return sdParameters_; - } - - // optional int32 createTime = 6; - public static final int CREATETIME_FIELD_NUMBER = 6; - private int createTime_; - /** - * optional int32 createTime = 6; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional int32 createTime = 6; - */ - public int getCreateTime() { - return createTime_; - } - - // optional int32 lastAccessTime = 7; - public static final int LASTACCESSTIME_FIELD_NUMBER = 7; - private int lastAccessTime_; - /** - * optional int32 lastAccessTime = 7; - */ - public boolean hasLastAccessTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional int32 lastAccessTime = 7; - */ - public int getLastAccessTime() { - return lastAccessTime_; - } - - // optional string indexTableName = 8; - public static final int INDEXTABLENAME_FIELD_NUMBER = 8; - private java.lang.Object indexTableName_; - /** - * optional string indexTableName = 8; - */ - public boolean hasIndexTableName() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional string indexTableName = 8; - */ - public java.lang.String getIndexTableName() { - java.lang.Object ref = indexTableName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - indexTableName_ = s; - } - return s; - } - } - /** - * optional string indexTableName = 8; - */ - public com.google.protobuf.ByteString - getIndexTableNameBytes() { - java.lang.Object ref = indexTableName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - indexTableName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional bytes sd_hash = 9; - public static final int SD_HASH_FIELD_NUMBER = 9; - private com.google.protobuf.ByteString sdHash_; - /** - * optional bytes sd_hash = 9; - */ - public boolean hasSdHash() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional bytes sd_hash = 9; - */ - public com.google.protobuf.ByteString getSdHash() { - return sdHash_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - public static final int PARAMETERS_FIELD_NUMBER = 10; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - return parameters_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - return parameters_; - } - - // optional bool deferredRebuild = 11; - public static final int DEFERREDREBUILD_FIELD_NUMBER = 11; - private boolean deferredRebuild_; - /** - * optional bool deferredRebuild = 11; - */ - public boolean hasDeferredRebuild() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional bool deferredRebuild = 11; - */ - public boolean getDeferredRebuild() { - return deferredRebuild_; - } - - private void initFields() { - indexHandlerClass_ = ""; - dbName_ = ""; - origTableName_ = ""; - location_ = ""; - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - createTime_ = 0; - lastAccessTime_ = 0; - indexTableName_ = ""; - sdHash_ = com.google.protobuf.ByteString.EMPTY; - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - deferredRebuild_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasDbName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasOrigTableName()) { - memoizedIsInitialized = 0; - return false; - } - if (hasSdParameters()) { - if (!getSdParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasParameters()) { - if (!getParameters().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getIndexHandlerClassBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getDbNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getOrigTableNameBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, getLocationBytes()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeMessage(5, sdParameters_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeInt32(6, createTime_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeInt32(7, lastAccessTime_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeBytes(8, getIndexTableNameBytes()); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBytes(9, sdHash_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeMessage(10, parameters_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeBool(11, deferredRebuild_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getIndexHandlerClassBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getDbNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getOrigTableNameBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, getLocationBytes()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, sdParameters_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(6, createTime_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(7, lastAccessTime_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(8, getIndexTableNameBytes()); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(9, sdHash_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(10, parameters_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(11, deferredRebuild_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Index} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.IndexOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getSdParametersFieldBuilder(); - getParametersFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - indexHandlerClass_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - dbName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - origTableName_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - location_ = ""; - bitField0_ = (bitField0_ & ~0x00000008); - if (sdParametersBuilder_ == null) { - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - sdParametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - createTime_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); - lastAccessTime_ = 0; - bitField0_ = (bitField0_ & ~0x00000040); - indexTableName_ = ""; - bitField0_ = (bitField0_ & ~0x00000080); - sdHash_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000100); - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000200); - deferredRebuild_ = false; - bitField0_ = (bitField0_ & ~0x00000400); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.indexHandlerClass_ = indexHandlerClass_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.dbName_ = dbName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.origTableName_ = origTableName_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.location_ = location_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - if (sdParametersBuilder_ == null) { - result.sdParameters_ = sdParameters_; - } else { - result.sdParameters_ = sdParametersBuilder_.build(); - } - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.lastAccessTime_ = lastAccessTime_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - result.indexTableName_ = indexTableName_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000100; - } - result.sdHash_ = sdHash_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000200; - } - if (parametersBuilder_ == null) { - result.parameters_ = parameters_; - } else { - result.parameters_ = parametersBuilder_.build(); - } - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000400; - } - result.deferredRebuild_ = deferredRebuild_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.getDefaultInstance()) return this; - if (other.hasIndexHandlerClass()) { - bitField0_ |= 0x00000001; - indexHandlerClass_ = other.indexHandlerClass_; - onChanged(); - } - if (other.hasDbName()) { - bitField0_ |= 0x00000002; - dbName_ = other.dbName_; - onChanged(); - } - if (other.hasOrigTableName()) { - bitField0_ |= 0x00000004; - origTableName_ = other.origTableName_; - onChanged(); - } - if (other.hasLocation()) { - bitField0_ |= 0x00000008; - location_ = other.location_; - onChanged(); - } - if (other.hasSdParameters()) { - mergeSdParameters(other.getSdParameters()); - } - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasLastAccessTime()) { - setLastAccessTime(other.getLastAccessTime()); - } - if (other.hasIndexTableName()) { - bitField0_ |= 0x00000080; - indexTableName_ = other.indexTableName_; - onChanged(); - } - if (other.hasSdHash()) { - setSdHash(other.getSdHash()); - } - if (other.hasParameters()) { - mergeParameters(other.getParameters()); - } - if (other.hasDeferredRebuild()) { - setDeferredRebuild(other.getDeferredRebuild()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasDbName()) { - - return false; - } - if (!hasOrigTableName()) { - - return false; - } - if (hasSdParameters()) { - if (!getSdParameters().isInitialized()) { - - return false; - } - } - if (hasParameters()) { - if (!getParameters().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional string indexHandlerClass = 1; - private java.lang.Object indexHandlerClass_ = ""; - /** - * optional string indexHandlerClass = 1; - * - *
-       * reserved
-       * 
- */ - public boolean hasIndexHandlerClass() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional string indexHandlerClass = 1; - * - *
-       * reserved
-       * 
- */ - public java.lang.String getIndexHandlerClass() { - java.lang.Object ref = indexHandlerClass_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - indexHandlerClass_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string indexHandlerClass = 1; - * - *
-       * reserved
-       * 
- */ - public com.google.protobuf.ByteString - getIndexHandlerClassBytes() { - java.lang.Object ref = indexHandlerClass_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - indexHandlerClass_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string indexHandlerClass = 1; - * - *
-       * reserved
-       * 
- */ - public Builder setIndexHandlerClass( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - indexHandlerClass_ = value; - onChanged(); - return this; - } - /** - * optional string indexHandlerClass = 1; - * - *
-       * reserved
-       * 
- */ - public Builder clearIndexHandlerClass() { - bitField0_ = (bitField0_ & ~0x00000001); - indexHandlerClass_ = getDefaultInstance().getIndexHandlerClass(); - onChanged(); - return this; - } - /** - * optional string indexHandlerClass = 1; - * - *
-       * reserved
-       * 
- */ - public Builder setIndexHandlerClassBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - indexHandlerClass_ = value; - onChanged(); - return this; - } - - // required string dbName = 2; - private java.lang.Object dbName_ = ""; - /** - * required string dbName = 2; - */ - public boolean hasDbName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string dbName = 2; - */ - public java.lang.String getDbName() { - java.lang.Object ref = dbName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - dbName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string dbName = 2; - */ - public com.google.protobuf.ByteString - getDbNameBytes() { - java.lang.Object ref = dbName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - dbName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string dbName = 2; - */ - public Builder setDbName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - dbName_ = value; - onChanged(); - return this; - } - /** - * required string dbName = 2; - */ - public Builder clearDbName() { - bitField0_ = (bitField0_ & ~0x00000002); - dbName_ = getDefaultInstance().getDbName(); - onChanged(); - return this; - } - /** - * required string dbName = 2; - */ - public Builder setDbNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - dbName_ = value; - onChanged(); - return this; - } - - // required string origTableName = 3; - private java.lang.Object origTableName_ = ""; - /** - * required string origTableName = 3; - */ - public boolean hasOrigTableName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string origTableName = 3; - */ - public java.lang.String getOrigTableName() { - java.lang.Object ref = origTableName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - origTableName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string origTableName = 3; - */ - public com.google.protobuf.ByteString - getOrigTableNameBytes() { - java.lang.Object ref = origTableName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - origTableName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string origTableName = 3; - */ - public Builder setOrigTableName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - origTableName_ = value; - onChanged(); - return this; - } - /** - * required string origTableName = 3; - */ - public Builder clearOrigTableName() { - bitField0_ = (bitField0_ & ~0x00000004); - origTableName_ = getDefaultInstance().getOrigTableName(); - onChanged(); - return this; - } - /** - * required string origTableName = 3; - */ - public Builder setOrigTableNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - origTableName_ = value; - onChanged(); - return this; - } - - // optional string location = 4; - private java.lang.Object location_ = ""; - /** - * optional string location = 4; - */ - public boolean hasLocation() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional string location = 4; - */ - public java.lang.String getLocation() { - java.lang.Object ref = location_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - location_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string location = 4; - */ - public com.google.protobuf.ByteString - getLocationBytes() { - java.lang.Object ref = location_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - location_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string location = 4; - */ - public Builder setLocation( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - location_ = value; - onChanged(); - return this; - } - /** - * optional string location = 4; - */ - public Builder clearLocation() { - bitField0_ = (bitField0_ & ~0x00000008); - location_ = getDefaultInstance().getLocation(); - onChanged(); - return this; - } - /** - * optional string location = 4; - */ - public Builder setLocationBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - location_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public boolean hasSdParameters() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { - if (sdParametersBuilder_ == null) { - return sdParameters_; - } else { - return sdParametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (sdParametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - sdParameters_ = value; - onChanged(); - } else { - sdParametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder setSdParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (sdParametersBuilder_ == null) { - sdParameters_ = builderForValue.build(); - onChanged(); - } else { - sdParametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (sdParametersBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010) && - sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - sdParameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); - } else { - sdParameters_ = value; - } - onChanged(); - } else { - sdParametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000010; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public Builder clearSdParameters() { - if (sdParametersBuilder_ == null) { - sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - sdParametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { - bitField0_ |= 0x00000010; - onChanged(); - return getSdParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { - if (sdParametersBuilder_ != null) { - return sdParametersBuilder_.getMessageOrBuilder(); - } else { - return sdParameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; - * - *
-       * storage descriptor parameters
-       * 
- */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getSdParametersFieldBuilder() { - if (sdParametersBuilder_ == null) { - sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - sdParameters_, - getParentForChildren(), - isClean()); - sdParameters_ = null; - } - return sdParametersBuilder_; - } - - // optional int32 createTime = 6; - private int createTime_ ; - /** - * optional int32 createTime = 6; - */ - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional int32 createTime = 6; - */ - public int getCreateTime() { - return createTime_; - } - /** - * optional int32 createTime = 6; - */ - public Builder setCreateTime(int value) { - bitField0_ |= 0x00000020; - createTime_ = value; - onChanged(); - return this; - } - /** - * optional int32 createTime = 6; - */ - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000020); - createTime_ = 0; - onChanged(); - return this; - } - - // optional int32 lastAccessTime = 7; - private int lastAccessTime_ ; - /** - * optional int32 lastAccessTime = 7; - */ - public boolean hasLastAccessTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional int32 lastAccessTime = 7; - */ - public int getLastAccessTime() { - return lastAccessTime_; - } - /** - * optional int32 lastAccessTime = 7; - */ - public Builder setLastAccessTime(int value) { - bitField0_ |= 0x00000040; - lastAccessTime_ = value; - onChanged(); - return this; - } - /** - * optional int32 lastAccessTime = 7; - */ - public Builder clearLastAccessTime() { - bitField0_ = (bitField0_ & ~0x00000040); - lastAccessTime_ = 0; - onChanged(); - return this; - } - - // optional string indexTableName = 8; - private java.lang.Object indexTableName_ = ""; - /** - * optional string indexTableName = 8; - */ - public boolean hasIndexTableName() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional string indexTableName = 8; - */ - public java.lang.String getIndexTableName() { - java.lang.Object ref = indexTableName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - indexTableName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string indexTableName = 8; - */ - public com.google.protobuf.ByteString - getIndexTableNameBytes() { - java.lang.Object ref = indexTableName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - indexTableName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string indexTableName = 8; - */ - public Builder setIndexTableName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000080; - indexTableName_ = value; - onChanged(); - return this; - } - /** - * optional string indexTableName = 8; - */ - public Builder clearIndexTableName() { - bitField0_ = (bitField0_ & ~0x00000080); - indexTableName_ = getDefaultInstance().getIndexTableName(); - onChanged(); - return this; - } - /** - * optional string indexTableName = 8; - */ - public Builder setIndexTableNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000080; - indexTableName_ = value; - onChanged(); - return this; - } - - // optional bytes sd_hash = 9; - private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; - /** - * optional bytes sd_hash = 9; - */ - public boolean hasSdHash() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional bytes sd_hash = 9; - */ - public com.google.protobuf.ByteString getSdHash() { - return sdHash_; - } - /** - * optional bytes sd_hash = 9; - */ - public Builder setSdHash(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000100; - sdHash_ = value; - onChanged(); - return this; - } - /** - * optional bytes sd_hash = 9; - */ - public Builder clearSdHash() { - bitField0_ = (bitField0_ & ~0x00000100); - sdHash_ = getDefaultInstance().getSdHash(); - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public boolean hasParameters() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { - if (parametersBuilder_ == null) { - return parameters_; - } else { - return parametersBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - parameters_ = value; - onChanged(); - } else { - parametersBuilder_.setMessage(value); - } - bitField0_ |= 0x00000200; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public Builder setParameters( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { - if (parametersBuilder_ == null) { - parameters_ = builderForValue.build(); - onChanged(); - } else { - parametersBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000200; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { - if (parametersBuilder_ == null) { - if (((bitField0_ & 0x00000200) == 0x00000200) && - parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { - parameters_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); - } else { - parameters_ = value; - } - onChanged(); - } else { - parametersBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000200; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public Builder clearParameters() { - if (parametersBuilder_ == null) { - parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - onChanged(); - } else { - parametersBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000200); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { - bitField0_ |= 0x00000200; - onChanged(); - return getParametersFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { - if (parametersBuilder_ != null) { - return parametersBuilder_.getMessageOrBuilder(); - } else { - return parameters_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> - getParametersFieldBuilder() { - if (parametersBuilder_ == null) { - parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( - parameters_, - getParentForChildren(), - isClean()); - parameters_ = null; - } - return parametersBuilder_; - } - - // optional bool deferredRebuild = 11; - private boolean deferredRebuild_ ; - /** - * optional bool deferredRebuild = 11; - */ - public boolean hasDeferredRebuild() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional bool deferredRebuild = 11; - */ - public boolean getDeferredRebuild() { - return deferredRebuild_; - } - /** - * optional bool deferredRebuild = 11; - */ - public Builder setDeferredRebuild(boolean value) { - bitField0_ |= 0x00000400; - deferredRebuild_ = value; - onChanged(); - return this; - } - /** - * optional bool deferredRebuild = 11; - */ - public Builder clearDeferredRebuild() { - bitField0_ = (bitField0_ & ~0x00000400); - deferredRebuild_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Index) - } - - static { - defaultInstance = new Index(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Index) - } - - public interface PartitionKeyComparatorOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string names = 1; - /** - * required string names = 1; - */ - boolean hasNames(); - /** - * required string names = 1; - */ - java.lang.String getNames(); - /** - * required string names = 1; - */ - com.google.protobuf.ByteString - getNamesBytes(); - - // required string types = 2; - /** - * required string types = 2; - */ - boolean hasTypes(); - /** - * required string types = 2; - */ - java.lang.String getTypes(); - /** - * required string types = 2; - */ - com.google.protobuf.ByteString - getTypesBytes(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - java.util.List - getOpList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - int getOpCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - java.util.List - getOpOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder( - int index); - - // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - java.util.List - getRangeList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - int getRangeCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - java.util.List - getRangeOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator} - */ - public static final class PartitionKeyComparator extends - com.google.protobuf.GeneratedMessage - implements PartitionKeyComparatorOrBuilder { - // Use PartitionKeyComparator.newBuilder() to construct. - private PartitionKeyComparator(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private PartitionKeyComparator(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final PartitionKeyComparator defaultInstance; - public static PartitionKeyComparator getDefaultInstance() { - return defaultInstance; - } - - public PartitionKeyComparator getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private PartitionKeyComparator( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - names_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - types_ = input.readBytes(); - break; - } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - op_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - op_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.PARSER, extensionRegistry)); - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - range_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - range_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - op_ = java.util.Collections.unmodifiableList(op_); - } - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - range_ = java.util.Collections.unmodifiableList(range_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public PartitionKeyComparator parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PartitionKeyComparator(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface MarkOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string value = 1; - /** - * required string value = 1; - */ - boolean hasValue(); - /** - * required string value = 1; - */ - java.lang.String getValue(); - /** - * required string value = 1; - */ - com.google.protobuf.ByteString - getValueBytes(); - - // required bool inclusive = 2; - /** - * required bool inclusive = 2; - */ - boolean hasInclusive(); - /** - * required bool inclusive = 2; - */ - boolean getInclusive(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark} - */ - public static final class Mark extends - com.google.protobuf.GeneratedMessage - implements MarkOrBuilder { - // Use Mark.newBuilder() to construct. - private Mark(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Mark(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Mark defaultInstance; - public static Mark getDefaultInstance() { - return defaultInstance; - } - - public Mark getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Mark( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - value_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - inclusive_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Mark parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Mark(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string value = 1; - public static final int VALUE_FIELD_NUMBER = 1; - private java.lang.Object value_; - /** - * required string value = 1; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string value = 1; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } - } - /** - * required string value = 1; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required bool inclusive = 2; - public static final int INCLUSIVE_FIELD_NUMBER = 2; - private boolean inclusive_; - /** - * required bool inclusive = 2; - */ - public boolean hasInclusive() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bool inclusive = 2; - */ - public boolean getInclusive() { - return inclusive_; - } - - private void initFields() { - value_ = ""; - inclusive_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasValue()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasInclusive()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getValueBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, inclusive_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getValueBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, inclusive_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - value_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - inclusive_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.value_ = value_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.inclusive_ = inclusive_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) return this; - if (other.hasValue()) { - bitField0_ |= 0x00000001; - value_ = other.value_; - onChanged(); - } - if (other.hasInclusive()) { - setInclusive(other.getInclusive()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasValue()) { - - return false; - } - if (!hasInclusive()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string value = 1; - private java.lang.Object value_ = ""; - /** - * required string value = 1; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string value = 1; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - value_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string value = 1; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string value = 1; - */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - value_ = value; - onChanged(); - return this; - } - /** - * required string value = 1; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000001); - value_ = getDefaultInstance().getValue(); - onChanged(); - return this; - } - /** - * required string value = 1; - */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - value_ = value; - onChanged(); - return this; - } - - // required bool inclusive = 2; - private boolean inclusive_ ; - /** - * required bool inclusive = 2; - */ - public boolean hasInclusive() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required bool inclusive = 2; - */ - public boolean getInclusive() { - return inclusive_; - } - /** - * required bool inclusive = 2; - */ - public Builder setInclusive(boolean value) { - bitField0_ |= 0x00000002; - inclusive_ = value; - onChanged(); - return this; - } - /** - * required bool inclusive = 2; - */ - public Builder clearInclusive() { - bitField0_ = (bitField0_ & ~0x00000002); - inclusive_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark) - } - - static { - defaultInstance = new Mark(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark) - } - - public interface RangeOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string key = 1; - /** - * required string key = 1; - */ - boolean hasKey(); - /** - * required string key = 1; - */ - java.lang.String getKey(); - /** - * required string key = 1; - */ - com.google.protobuf.ByteString - getKeyBytes(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - boolean hasStart(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder(); - - // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - boolean hasEnd(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd(); - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range} - */ - public static final class Range extends - com.google.protobuf.GeneratedMessage - implements RangeOrBuilder { - // Use Range.newBuilder() to construct. - private Range(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Range(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Range defaultInstance; - public static Range getDefaultInstance() { - return defaultInstance; - } - - public Range getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Range( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - key_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = start_.toBuilder(); - } - start_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(start_); - start_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000002; - break; - } - case 26: { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder subBuilder = null; - if (((bitField0_ & 0x00000004) == 0x00000004)) { - subBuilder = end_.toBuilder(); - } - end_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(end_); - end_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000004; - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Range parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Range(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string key = 1; - public static final int KEY_FIELD_NUMBER = 1; - private java.lang.Object key_; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - public static final int START_FIELD_NUMBER = 2; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark start_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public boolean hasStart() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart() { - return start_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder() { - return start_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - public static final int END_FIELD_NUMBER = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark end_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public boolean hasEnd() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd() { - return end_; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder() { - return end_; - } - - private void initFields() { - key_ = ""; - start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasKey()) { - memoizedIsInitialized = 0; - return false; - } - if (hasStart()) { - if (!getStart().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - if (hasEnd()) { - if (!getEnd().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getKeyBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, start_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeMessage(3, end_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getKeyBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, start_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, end_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getStartFieldBuilder(); - getEndFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - key_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (startBuilder_ == null) { - start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - } else { - startBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - if (endBuilder_ == null) { - end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - } else { - endBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.key_ = key_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (startBuilder_ == null) { - result.start_ = start_; - } else { - result.start_ = startBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - if (endBuilder_ == null) { - result.end_ = end_; - } else { - result.end_ = endBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()) return this; - if (other.hasKey()) { - bitField0_ |= 0x00000001; - key_ = other.key_; - onChanged(); - } - if (other.hasStart()) { - mergeStart(other.getStart()); - } - if (other.hasEnd()) { - mergeEnd(other.getEnd()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKey()) { - - return false; - } - if (hasStart()) { - if (!getStart().isInitialized()) { - - return false; - } - } - if (hasEnd()) { - if (!getEnd().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string key = 1; - private java.lang.Object key_ = ""; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - key_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string key = 1; - */ - public Builder setKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000001); - key_ = getDefaultInstance().getKey(); - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder setKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> startBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public boolean hasStart() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart() { - if (startBuilder_ == null) { - return start_; - } else { - return startBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public Builder setStart(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { - if (startBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - start_ = value; - onChanged(); - } else { - startBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public Builder setStart( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder builderForValue) { - if (startBuilder_ == null) { - start_ = builderForValue.build(); - onChanged(); - } else { - startBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public Builder mergeStart(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { - if (startBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - start_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) { - start_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder(start_).mergeFrom(value).buildPartial(); - } else { - start_ = value; - } - onChanged(); - } else { - startBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public Builder clearStart() { - if (startBuilder_ == null) { - start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - onChanged(); - } else { - startBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder getStartBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getStartFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder() { - if (startBuilder_ != null) { - return startBuilder_.getMessageOrBuilder(); - } else { - return start_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> - getStartFieldBuilder() { - if (startBuilder_ == null) { - startBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>( - start_, - getParentForChildren(), - isClean()); - start_ = null; - } - return startBuilder_; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> endBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public boolean hasEnd() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd() { - if (endBuilder_ == null) { - return end_; - } else { - return endBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public Builder setEnd(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { - if (endBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - end_ = value; - onChanged(); - } else { - endBuilder_.setMessage(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public Builder setEnd( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder builderForValue) { - if (endBuilder_ == null) { - end_ = builderForValue.build(); - onChanged(); - } else { - endBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public Builder mergeEnd(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { - if (endBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004) && - end_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) { - end_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder(end_).mergeFrom(value).buildPartial(); - } else { - end_ = value; - } - onChanged(); - } else { - endBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000004; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public Builder clearEnd() { - if (endBuilder_ == null) { - end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); - onChanged(); - } else { - endBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder getEndBuilder() { - bitField0_ |= 0x00000004; - onChanged(); - return getEndFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder() { - if (endBuilder_ != null) { - return endBuilder_.getMessageOrBuilder(); - } else { - return end_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> - getEndFieldBuilder() { - if (endBuilder_ == null) { - endBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>( - end_, - getParentForChildren(), - isClean()); - end_ = null; - } - return endBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range) - } - - static { - defaultInstance = new Range(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range) - } - - public interface OperatorOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - boolean hasType(); - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType(); - - // required string key = 2; - /** - * required string key = 2; - */ - boolean hasKey(); - /** - * required string key = 2; - */ - java.lang.String getKey(); - /** - * required string key = 2; - */ - com.google.protobuf.ByteString - getKeyBytes(); - - // required string val = 3; - /** - * required string val = 3; - */ - boolean hasVal(); - /** - * required string val = 3; - */ - java.lang.String getVal(); - /** - * required string val = 3; - */ - com.google.protobuf.ByteString - getValBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator} - */ - public static final class Operator extends - com.google.protobuf.GeneratedMessage - implements OperatorOrBuilder { - // Use Operator.newBuilder() to construct. - private Operator(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Operator(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Operator defaultInstance; - public static Operator getDefaultInstance() { - return defaultInstance; - } - - public Operator getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Operator( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } - break; - } - case 18: { - bitField0_ |= 0x00000002; - key_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - val_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Operator parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Operator(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type} - */ - public enum Type - implements com.google.protobuf.ProtocolMessageEnum { - /** - * LIKE = 0; - */ - LIKE(0, 0), - /** - * NOTEQUALS = 1; - */ - NOTEQUALS(1, 1), - ; - - /** - * LIKE = 0; - */ - public static final int LIKE_VALUE = 0; - /** - * NOTEQUALS = 1; - */ - public static final int NOTEQUALS_VALUE = 1; - - - public final int getNumber() { return value; } - - public static Type valueOf(int value) { - switch (value) { - case 0: return LIKE; - case 1: return NOTEQUALS; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDescriptor().getEnumTypes().get(0); - } - - private static final Type[] VALUES = values(); - - public static Type valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Type(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type) - } - - private int bitField0_; - // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type type_; - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType() { - return type_; - } - - // required string key = 2; - public static final int KEY_FIELD_NUMBER = 2; - private java.lang.Object key_; - /** - * required string key = 2; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string key = 2; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; - } - } - /** - * required string key = 2; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string val = 3; - public static final int VAL_FIELD_NUMBER = 3; - private java.lang.Object val_; - /** - * required string val = 3; - */ - public boolean hasVal() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string val = 3; - */ - public java.lang.String getVal() { - java.lang.Object ref = val_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - val_ = s; - } - return s; - } - } - /** - * required string val = 3; - */ - public com.google.protobuf.ByteString - getValBytes() { - java.lang.Object ref = val_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - val_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; - key_ = ""; - val_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKey()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasVal()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getKeyBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getValBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getKeyBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getValBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; - bitField0_ = (bitField0_ & ~0x00000001); - key_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - val_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.key_ = key_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.val_ = val_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasKey()) { - bitField0_ |= 0x00000002; - key_ = other.key_; - onChanged(); - } - if (other.hasVal()) { - bitField0_ |= 0x00000004; - val_ = other.val_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - if (!hasKey()) { - - return false; - } - if (!hasVal()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType() { - return type_; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - public Builder setType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - type_ = value; - onChanged(); - return this; - } - /** - * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; - onChanged(); - return this; - } - - // required string key = 2; - private java.lang.Object key_ = ""; - /** - * required string key = 2; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string key = 2; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - key_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string key = 2; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string key = 2; - */ - public Builder setKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - key_ = value; - onChanged(); - return this; - } - /** - * required string key = 2; - */ - public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000002); - key_ = getDefaultInstance().getKey(); - onChanged(); - return this; - } - /** - * required string key = 2; - */ - public Builder setKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - key_ = value; - onChanged(); - return this; - } - - // required string val = 3; - private java.lang.Object val_ = ""; - /** - * required string val = 3; - */ - public boolean hasVal() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string val = 3; - */ - public java.lang.String getVal() { - java.lang.Object ref = val_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - val_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string val = 3; - */ - public com.google.protobuf.ByteString - getValBytes() { - java.lang.Object ref = val_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - val_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string val = 3; - */ - public Builder setVal( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - val_ = value; - onChanged(); - return this; - } - /** - * required string val = 3; - */ - public Builder clearVal() { - bitField0_ = (bitField0_ & ~0x00000004); - val_ = getDefaultInstance().getVal(); - onChanged(); - return this; - } - /** - * required string val = 3; - */ - public Builder setValBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - val_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator) - } - - static { - defaultInstance = new Operator(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator) - } - - private int bitField0_; - // required string names = 1; - public static final int NAMES_FIELD_NUMBER = 1; - private java.lang.Object names_; - /** - * required string names = 1; - */ - public boolean hasNames() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string names = 1; - */ - public java.lang.String getNames() { - java.lang.Object ref = names_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - names_ = s; - } - return s; - } - } - /** - * required string names = 1; - */ - public com.google.protobuf.ByteString - getNamesBytes() { - java.lang.Object ref = names_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - names_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string types = 2; - public static final int TYPES_FIELD_NUMBER = 2; - private java.lang.Object types_; - /** - * required string types = 2; - */ - public boolean hasTypes() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string types = 2; - */ - public java.lang.String getTypes() { - java.lang.Object ref = types_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - types_ = s; - } - return s; - } - } - /** - * required string types = 2; - */ - public com.google.protobuf.ByteString - getTypesBytes() { - java.lang.Object ref = types_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - types_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - public static final int OP_FIELD_NUMBER = 3; - private java.util.List op_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public java.util.List getOpList() { - return op_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public java.util.List - getOpOrBuilderList() { - return op_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public int getOpCount() { - return op_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index) { - return op_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder( - int index) { - return op_.get(index); - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - public static final int RANGE_FIELD_NUMBER = 4; - private java.util.List range_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public java.util.List getRangeList() { - return range_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public java.util.List - getRangeOrBuilderList() { - return range_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public int getRangeCount() { - return range_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index) { - return range_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder( - int index) { - return range_.get(index); - } - - private void initFields() { - names_ = ""; - types_ = ""; - op_ = java.util.Collections.emptyList(); - range_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasNames()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTypes()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getOpCount(); i++) { - if (!getOp(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getRangeCount(); i++) { - if (!getRange(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNamesBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getTypesBytes()); - } - for (int i = 0; i < op_.size(); i++) { - output.writeMessage(3, op_.get(i)); - } - for (int i = 0; i < range_.size(); i++) { - output.writeMessage(4, range_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNamesBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getTypesBytes()); - } - for (int i = 0; i < op_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, op_.get(i)); - } - for (int i = 0; i < range_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, range_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparatorOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getOpFieldBuilder(); - getRangeFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - names_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - types_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - if (opBuilder_ == null) { - op_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - opBuilder_.clear(); - } - if (rangeBuilder_ == null) { - range_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - rangeBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.names_ = names_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.types_ = types_; - if (opBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - op_ = java.util.Collections.unmodifiableList(op_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.op_ = op_; - } else { - result.op_ = opBuilder_.build(); - } - if (rangeBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - range_ = java.util.Collections.unmodifiableList(range_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.range_ = range_; - } else { - result.range_ = rangeBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.getDefaultInstance()) return this; - if (other.hasNames()) { - bitField0_ |= 0x00000001; - names_ = other.names_; - onChanged(); - } - if (other.hasTypes()) { - bitField0_ |= 0x00000002; - types_ = other.types_; - onChanged(); - } - if (opBuilder_ == null) { - if (!other.op_.isEmpty()) { - if (op_.isEmpty()) { - op_ = other.op_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureOpIsMutable(); - op_.addAll(other.op_); - } - onChanged(); - } - } else { - if (!other.op_.isEmpty()) { - if (opBuilder_.isEmpty()) { - opBuilder_.dispose(); - opBuilder_ = null; - op_ = other.op_; - bitField0_ = (bitField0_ & ~0x00000004); - opBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getOpFieldBuilder() : null; - } else { - opBuilder_.addAllMessages(other.op_); - } - } - } - if (rangeBuilder_ == null) { - if (!other.range_.isEmpty()) { - if (range_.isEmpty()) { - range_ = other.range_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureRangeIsMutable(); - range_.addAll(other.range_); - } - onChanged(); - } - } else { - if (!other.range_.isEmpty()) { - if (rangeBuilder_.isEmpty()) { - rangeBuilder_.dispose(); - rangeBuilder_ = null; - range_ = other.range_; - bitField0_ = (bitField0_ & ~0x00000008); - rangeBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getRangeFieldBuilder() : null; - } else { - rangeBuilder_.addAllMessages(other.range_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasNames()) { - - return false; - } - if (!hasTypes()) { - - return false; - } - for (int i = 0; i < getOpCount(); i++) { - if (!getOp(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getRangeCount(); i++) { - if (!getRange(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string names = 1; - private java.lang.Object names_ = ""; - /** - * required string names = 1; - */ - public boolean hasNames() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string names = 1; - */ - public java.lang.String getNames() { - java.lang.Object ref = names_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - names_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string names = 1; - */ - public com.google.protobuf.ByteString - getNamesBytes() { - java.lang.Object ref = names_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - names_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string names = 1; - */ - public Builder setNames( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - names_ = value; - onChanged(); - return this; - } - /** - * required string names = 1; - */ - public Builder clearNames() { - bitField0_ = (bitField0_ & ~0x00000001); - names_ = getDefaultInstance().getNames(); - onChanged(); - return this; - } - /** - * required string names = 1; - */ - public Builder setNamesBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - names_ = value; - onChanged(); - return this; - } - - // required string types = 2; - private java.lang.Object types_ = ""; - /** - * required string types = 2; - */ - public boolean hasTypes() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string types = 2; - */ - public java.lang.String getTypes() { - java.lang.Object ref = types_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - types_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string types = 2; - */ - public com.google.protobuf.ByteString - getTypesBytes() { - java.lang.Object ref = types_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - types_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string types = 2; - */ - public Builder setTypes( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - types_ = value; - onChanged(); - return this; - } - /** - * required string types = 2; - */ - public Builder clearTypes() { - bitField0_ = (bitField0_ & ~0x00000002); - types_ = getDefaultInstance().getTypes(); - onChanged(); - return this; - } - /** - * required string types = 2; - */ - public Builder setTypesBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - types_ = value; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - private java.util.List op_ = - java.util.Collections.emptyList(); - private void ensureOpIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - op_ = new java.util.ArrayList(op_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder> opBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public java.util.List getOpList() { - if (opBuilder_ == null) { - return java.util.Collections.unmodifiableList(op_); - } else { - return opBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public int getOpCount() { - if (opBuilder_ == null) { - return op_.size(); - } else { - return opBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index) { - if (opBuilder_ == null) { - return op_.get(index); - } else { - return opBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder setOp( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) { - if (opBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureOpIsMutable(); - op_.set(index, value); - onChanged(); - } else { - opBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder setOp( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) { - if (opBuilder_ == null) { - ensureOpIsMutable(); - op_.set(index, builderForValue.build()); - onChanged(); - } else { - opBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder addOp(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) { - if (opBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureOpIsMutable(); - op_.add(value); - onChanged(); - } else { - opBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder addOp( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) { - if (opBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureOpIsMutable(); - op_.add(index, value); - onChanged(); - } else { - opBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder addOp( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) { - if (opBuilder_ == null) { - ensureOpIsMutable(); - op_.add(builderForValue.build()); - onChanged(); - } else { - opBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder addOp( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) { - if (opBuilder_ == null) { - ensureOpIsMutable(); - op_.add(index, builderForValue.build()); - onChanged(); - } else { - opBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder addAllOp( - java.lang.Iterable values) { - if (opBuilder_ == null) { - ensureOpIsMutable(); - super.addAll(values, op_); - onChanged(); - } else { - opBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder clearOp() { - if (opBuilder_ == null) { - op_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - opBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public Builder removeOp(int index) { - if (opBuilder_ == null) { - ensureOpIsMutable(); - op_.remove(index); - onChanged(); - } else { - opBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder getOpBuilder( - int index) { - return getOpFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder( - int index) { - if (opBuilder_ == null) { - return op_.get(index); } else { - return opBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public java.util.List - getOpOrBuilderList() { - if (opBuilder_ != null) { - return opBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(op_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder addOpBuilder() { - return getOpFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder addOpBuilder( - int index) { - return getOpFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; - */ - public java.util.List - getOpBuilderList() { - return getOpFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder> - getOpFieldBuilder() { - if (opBuilder_ == null) { - opBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>( - op_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - op_ = null; - } - return opBuilder_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - private java.util.List range_ = - java.util.Collections.emptyList(); - private void ensureRangeIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - range_ = new java.util.ArrayList(range_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder> rangeBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public java.util.List getRangeList() { - if (rangeBuilder_ == null) { - return java.util.Collections.unmodifiableList(range_); - } else { - return rangeBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public int getRangeCount() { - if (rangeBuilder_ == null) { - return range_.size(); - } else { - return rangeBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index) { - if (rangeBuilder_ == null) { - return range_.get(index); - } else { - return rangeBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder setRange( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) { - if (rangeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRangeIsMutable(); - range_.set(index, value); - onChanged(); - } else { - rangeBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder setRange( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) { - if (rangeBuilder_ == null) { - ensureRangeIsMutable(); - range_.set(index, builderForValue.build()); - onChanged(); - } else { - rangeBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder addRange(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) { - if (rangeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRangeIsMutable(); - range_.add(value); - onChanged(); - } else { - rangeBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder addRange( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) { - if (rangeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRangeIsMutable(); - range_.add(index, value); - onChanged(); - } else { - rangeBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder addRange( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) { - if (rangeBuilder_ == null) { - ensureRangeIsMutable(); - range_.add(builderForValue.build()); - onChanged(); - } else { - rangeBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder addRange( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) { - if (rangeBuilder_ == null) { - ensureRangeIsMutable(); - range_.add(index, builderForValue.build()); - onChanged(); - } else { - rangeBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder addAllRange( - java.lang.Iterable values) { - if (rangeBuilder_ == null) { - ensureRangeIsMutable(); - super.addAll(values, range_); - onChanged(); - } else { - rangeBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder clearRange() { - if (rangeBuilder_ == null) { - range_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - rangeBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public Builder removeRange(int index) { - if (rangeBuilder_ == null) { - ensureRangeIsMutable(); - range_.remove(index); - onChanged(); - } else { - rangeBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder getRangeBuilder( - int index) { - return getRangeFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder( - int index) { - if (rangeBuilder_ == null) { - return range_.get(index); } else { - return rangeBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public java.util.List - getRangeOrBuilderList() { - if (rangeBuilder_ != null) { - return rangeBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(range_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder addRangeBuilder() { - return getRangeFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder addRangeBuilder( - int index) { - return getRangeFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; - */ - public java.util.List - getRangeBuilderList() { - return getRangeFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder> - getRangeFieldBuilder() { - if (rangeBuilder_ == null) { - rangeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>( - range_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - range_ = null; - } - return rangeBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator) - } - - static { - defaultInstance = new PartitionKeyComparator(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator) - } - - public interface PrimaryKeyOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string pk_name = 1; - /** - * required string pk_name = 1; - */ - boolean hasPkName(); - /** - * required string pk_name = 1; - */ - java.lang.String getPkName(); - /** - * required string pk_name = 1; - */ - com.google.protobuf.ByteString - getPkNameBytes(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - java.util.List - getColsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn getCols(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - int getColsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - java.util.List - getColsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder getColsOrBuilder( - int index); - - // optional bool enable_constraint = 3; - /** - * optional bool enable_constraint = 3; - */ - boolean hasEnableConstraint(); - /** - * optional bool enable_constraint = 3; - */ - boolean getEnableConstraint(); - - // optional bool validate_constraint = 4; - /** - * optional bool validate_constraint = 4; - */ - boolean hasValidateConstraint(); - /** - * optional bool validate_constraint = 4; - */ - boolean getValidateConstraint(); - - // optional bool rely_constraint = 5; - /** - * optional bool rely_constraint = 5; - */ - boolean hasRelyConstraint(); - /** - * optional bool rely_constraint = 5; - */ - boolean getRelyConstraint(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrimaryKey} - */ - public static final class PrimaryKey extends - com.google.protobuf.GeneratedMessage - implements PrimaryKeyOrBuilder { - // Use PrimaryKey.newBuilder() to construct. - private PrimaryKey(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private PrimaryKey(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final PrimaryKey defaultInstance; - public static PrimaryKey getDefaultInstance() { - return defaultInstance; - } - - public PrimaryKey getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private PrimaryKey( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - pkName_ = input.readBytes(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.PARSER, extensionRegistry)); - break; - } - case 24: { - bitField0_ |= 0x00000002; - enableConstraint_ = input.readBool(); - break; - } - case 32: { - bitField0_ |= 0x00000004; - validateConstraint_ = input.readBool(); - break; - } - case 40: { - bitField0_ |= 0x00000008; - relyConstraint_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public PrimaryKey parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PrimaryKey(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface PrimaryKeyColumnOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string column_name = 1; - /** - * required string column_name = 1; - */ - boolean hasColumnName(); - /** - * required string column_name = 1; - */ - java.lang.String getColumnName(); - /** - * required string column_name = 1; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); - - // required sint32 key_seq = 2; - /** - * required sint32 key_seq = 2; - */ - boolean hasKeySeq(); - /** - * required sint32 key_seq = 2; - */ - int getKeySeq(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn} - */ - public static final class PrimaryKeyColumn extends - com.google.protobuf.GeneratedMessage - implements PrimaryKeyColumnOrBuilder { - // Use PrimaryKeyColumn.newBuilder() to construct. - private PrimaryKeyColumn(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private PrimaryKeyColumn(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final PrimaryKeyColumn defaultInstance; - public static PrimaryKeyColumn getDefaultInstance() { - return defaultInstance; - } - - public PrimaryKeyColumn getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private PrimaryKeyColumn( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - columnName_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - keySeq_ = input.readSInt32(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public PrimaryKeyColumn parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PrimaryKeyColumn(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string column_name = 1; - public static final int COLUMN_NAME_FIELD_NUMBER = 1; - private java.lang.Object columnName_; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required sint32 key_seq = 2; - public static final int KEY_SEQ_FIELD_NUMBER = 2; - private int keySeq_; - /** - * required sint32 key_seq = 2; - */ - public boolean hasKeySeq() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required sint32 key_seq = 2; - */ - public int getKeySeq() { - return keySeq_; - } - - private void initFields() { - columnName_ = ""; - keySeq_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasColumnName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKeySeq()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeSInt32(2, keySeq_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt32Size(2, keySeq_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - columnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - keySeq_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.columnName_ = columnName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.keySeq_ = keySeq_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.getDefaultInstance()) return this; - if (other.hasColumnName()) { - bitField0_ |= 0x00000001; - columnName_ = other.columnName_; - onChanged(); - } - if (other.hasKeySeq()) { - setKeySeq(other.getKeySeq()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasColumnName()) { - - return false; - } - if (!hasKeySeq()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string column_name = 1; - private java.lang.Object columnName_ = ""; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string column_name = 1; - */ - public Builder setColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000001); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - - // required sint32 key_seq = 2; - private int keySeq_ ; - /** - * required sint32 key_seq = 2; - */ - public boolean hasKeySeq() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required sint32 key_seq = 2; - */ - public int getKeySeq() { - return keySeq_; - } - /** - * required sint32 key_seq = 2; - */ - public Builder setKeySeq(int value) { - bitField0_ |= 0x00000002; - keySeq_ = value; - onChanged(); - return this; - } - /** - * required sint32 key_seq = 2; - */ - public Builder clearKeySeq() { - bitField0_ = (bitField0_ & ~0x00000002); - keySeq_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn) - } - - static { - defaultInstance = new PrimaryKeyColumn(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn) - } - - private int bitField0_; - // required string pk_name = 1; - public static final int PK_NAME_FIELD_NUMBER = 1; - private java.lang.Object pkName_; - /** - * required string pk_name = 1; - */ - public boolean hasPkName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string pk_name = 1; - */ - public java.lang.String getPkName() { - java.lang.Object ref = pkName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - pkName_ = s; - } - return s; - } - } - /** - * required string pk_name = 1; - */ - public com.google.protobuf.ByteString - getPkNameBytes() { - java.lang.Object ref = pkName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - pkName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - public static final int COLS_FIELD_NUMBER = 2; - private java.util.List cols_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public java.util.List getColsList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public java.util.List - getColsOrBuilderList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public int getColsCount() { - return cols_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn getCols(int index) { - return cols_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder getColsOrBuilder( - int index) { - return cols_.get(index); - } - - // optional bool enable_constraint = 3; - public static final int ENABLE_CONSTRAINT_FIELD_NUMBER = 3; - private boolean enableConstraint_; - /** - * optional bool enable_constraint = 3; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool enable_constraint = 3; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - - // optional bool validate_constraint = 4; - public static final int VALIDATE_CONSTRAINT_FIELD_NUMBER = 4; - private boolean validateConstraint_; - /** - * optional bool validate_constraint = 4; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool validate_constraint = 4; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - - // optional bool rely_constraint = 5; - public static final int RELY_CONSTRAINT_FIELD_NUMBER = 5; - private boolean relyConstraint_; - /** - * optional bool rely_constraint = 5; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool rely_constraint = 5; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - - private void initFields() { - pkName_ = ""; - cols_ = java.util.Collections.emptyList(); - enableConstraint_ = false; - validateConstraint_ = false; - relyConstraint_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPkName()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getPkNameBytes()); - } - for (int i = 0; i < cols_.size(); i++) { - output.writeMessage(2, cols_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(3, enableConstraint_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(4, validateConstraint_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(5, relyConstraint_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getPkNameBytes()); - } - for (int i = 0; i < cols_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, cols_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, enableConstraint_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, validateConstraint_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, relyConstraint_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrimaryKey} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKeyOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getColsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - pkName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - colsBuilder_.clear(); - } - enableConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - validateConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - relyConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.pkName_ = pkName_; - if (colsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.cols_ = cols_; - } else { - result.cols_ = colsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.enableConstraint_ = enableConstraint_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.validateConstraint_ = validateConstraint_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.relyConstraint_ = relyConstraint_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.getDefaultInstance()) return this; - if (other.hasPkName()) { - bitField0_ |= 0x00000001; - pkName_ = other.pkName_; - onChanged(); - } - if (colsBuilder_ == null) { - if (!other.cols_.isEmpty()) { - if (cols_.isEmpty()) { - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureColsIsMutable(); - cols_.addAll(other.cols_); - } - onChanged(); - } - } else { - if (!other.cols_.isEmpty()) { - if (colsBuilder_.isEmpty()) { - colsBuilder_.dispose(); - colsBuilder_ = null; - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000002); - colsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColsFieldBuilder() : null; - } else { - colsBuilder_.addAllMessages(other.cols_); - } - } - } - if (other.hasEnableConstraint()) { - setEnableConstraint(other.getEnableConstraint()); - } - if (other.hasValidateConstraint()) { - setValidateConstraint(other.getValidateConstraint()); - } - if (other.hasRelyConstraint()) { - setRelyConstraint(other.getRelyConstraint()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPkName()) { - - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string pk_name = 1; - private java.lang.Object pkName_ = ""; - /** - * required string pk_name = 1; - */ - public boolean hasPkName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string pk_name = 1; - */ - public java.lang.String getPkName() { - java.lang.Object ref = pkName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - pkName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string pk_name = 1; - */ - public com.google.protobuf.ByteString - getPkNameBytes() { - java.lang.Object ref = pkName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - pkName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string pk_name = 1; - */ - public Builder setPkName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - pkName_ = value; - onChanged(); - return this; - } - /** - * required string pk_name = 1; - */ - public Builder clearPkName() { - bitField0_ = (bitField0_ & ~0x00000001); - pkName_ = getDefaultInstance().getPkName(); - onChanged(); - return this; - } - /** - * required string pk_name = 1; - */ - public Builder setPkNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - pkName_ = value; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - private java.util.List cols_ = - java.util.Collections.emptyList(); - private void ensureColsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = new java.util.ArrayList(cols_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder> colsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public java.util.List getColsList() { - if (colsBuilder_ == null) { - return java.util.Collections.unmodifiableList(cols_); - } else { - return colsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public int getColsCount() { - if (colsBuilder_ == null) { - return cols_.size(); - } else { - return colsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn getCols(int index) { - if (colsBuilder_ == null) { - return cols_.get(index); - } else { - return colsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.set(index, value); - onChanged(); - } else { - colsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.set(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(value); - onChanged(); - } else { - colsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(index, value); - onChanged(); - } else { - colsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder addCols( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder addAllCols( - java.lang.Iterable values) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - super.addAll(values, cols_); - onChanged(); - } else { - colsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder clearCols() { - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - colsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public Builder removeCols(int index) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.remove(index); - onChanged(); - } else { - colsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder getColsBuilder( - int index) { - return getColsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder getColsOrBuilder( - int index) { - if (colsBuilder_ == null) { - return cols_.get(index); } else { - return colsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public java.util.List - getColsOrBuilderList() { - if (colsBuilder_ != null) { - return colsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(cols_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder addColsBuilder() { - return getColsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder addColsBuilder( - int index) { - return getColsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.PrimaryKey.PrimaryKeyColumn cols = 2; - */ - public java.util.List - getColsBuilderList() { - return getColsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder> - getColsFieldBuilder() { - if (colsBuilder_ == null) { - colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumnOrBuilder>( - cols_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - cols_ = null; - } - return colsBuilder_; - } - - // optional bool enable_constraint = 3; - private boolean enableConstraint_ ; - /** - * optional bool enable_constraint = 3; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool enable_constraint = 3; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - /** - * optional bool enable_constraint = 3; - */ - public Builder setEnableConstraint(boolean value) { - bitField0_ |= 0x00000004; - enableConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool enable_constraint = 3; - */ - public Builder clearEnableConstraint() { - bitField0_ = (bitField0_ & ~0x00000004); - enableConstraint_ = false; - onChanged(); - return this; - } - - // optional bool validate_constraint = 4; - private boolean validateConstraint_ ; - /** - * optional bool validate_constraint = 4; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool validate_constraint = 4; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - /** - * optional bool validate_constraint = 4; - */ - public Builder setValidateConstraint(boolean value) { - bitField0_ |= 0x00000008; - validateConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool validate_constraint = 4; - */ - public Builder clearValidateConstraint() { - bitField0_ = (bitField0_ & ~0x00000008); - validateConstraint_ = false; - onChanged(); - return this; - } - - // optional bool rely_constraint = 5; - private boolean relyConstraint_ ; - /** - * optional bool rely_constraint = 5; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool rely_constraint = 5; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - /** - * optional bool rely_constraint = 5; - */ - public Builder setRelyConstraint(boolean value) { - bitField0_ |= 0x00000010; - relyConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool rely_constraint = 5; - */ - public Builder clearRelyConstraint() { - bitField0_ = (bitField0_ & ~0x00000010); - relyConstraint_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrimaryKey) - } - - static { - defaultInstance = new PrimaryKey(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrimaryKey) - } - - public interface ForeignKeysOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - java.util.List - getFksList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey getFks(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - int getFksCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - java.util.List - getFksOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder getFksOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ForeignKeys} - */ - public static final class ForeignKeys extends - com.google.protobuf.GeneratedMessage - implements ForeignKeysOrBuilder { - // Use ForeignKeys.newBuilder() to construct. - private ForeignKeys(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ForeignKeys(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ForeignKeys defaultInstance; - public static ForeignKeys getDefaultInstance() { - return defaultInstance; - } - - public ForeignKeys getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ForeignKeys( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - fks_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - fks_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - fks_ = java.util.Collections.unmodifiableList(fks_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ForeignKeys parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ForeignKeys(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface ForeignKeyOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string fk_name = 1; - /** - * required string fk_name = 1; - */ - boolean hasFkName(); - /** - * required string fk_name = 1; - */ - java.lang.String getFkName(); - /** - * required string fk_name = 1; - */ - com.google.protobuf.ByteString - getFkNameBytes(); - - // required string referenced_db_name = 2; - /** - * required string referenced_db_name = 2; - */ - boolean hasReferencedDbName(); - /** - * required string referenced_db_name = 2; - */ - java.lang.String getReferencedDbName(); - /** - * required string referenced_db_name = 2; - */ - com.google.protobuf.ByteString - getReferencedDbNameBytes(); - - // required string referenced_table_name = 3; - /** - * required string referenced_table_name = 3; - */ - boolean hasReferencedTableName(); - /** - * required string referenced_table_name = 3; - */ - java.lang.String getReferencedTableName(); - /** - * required string referenced_table_name = 3; - */ - com.google.protobuf.ByteString - getReferencedTableNameBytes(); - - // optional string referenced_pk_name = 4; - /** - * optional string referenced_pk_name = 4; - */ - boolean hasReferencedPkName(); - /** - * optional string referenced_pk_name = 4; - */ - java.lang.String getReferencedPkName(); - /** - * optional string referenced_pk_name = 4; - */ - com.google.protobuf.ByteString - getReferencedPkNameBytes(); - - // optional int32 update_rule = 5; - /** - * optional int32 update_rule = 5; - */ - boolean hasUpdateRule(); - /** - * optional int32 update_rule = 5; - */ - int getUpdateRule(); - - // optional int32 delete_rule = 6; - /** - * optional int32 delete_rule = 6; - */ - boolean hasDeleteRule(); - /** - * optional int32 delete_rule = 6; - */ - int getDeleteRule(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - java.util.List - getColsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn getCols(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - int getColsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - java.util.List - getColsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder getColsOrBuilder( - int index); - - // optional bool enable_constraint = 8; - /** - * optional bool enable_constraint = 8; - */ - boolean hasEnableConstraint(); - /** - * optional bool enable_constraint = 8; - */ - boolean getEnableConstraint(); - - // optional bool validate_constraint = 9; - /** - * optional bool validate_constraint = 9; - */ - boolean hasValidateConstraint(); - /** - * optional bool validate_constraint = 9; - */ - boolean getValidateConstraint(); - - // optional bool rely_constraint = 10; - /** - * optional bool rely_constraint = 10; - */ - boolean hasRelyConstraint(); - /** - * optional bool rely_constraint = 10; - */ - boolean getRelyConstraint(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey} - */ - public static final class ForeignKey extends - com.google.protobuf.GeneratedMessage - implements ForeignKeyOrBuilder { - // Use ForeignKey.newBuilder() to construct. - private ForeignKey(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ForeignKey(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ForeignKey defaultInstance; - public static ForeignKey getDefaultInstance() { - return defaultInstance; - } - - public ForeignKey getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ForeignKey( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - fkName_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - referencedDbName_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - referencedTableName_ = input.readBytes(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - referencedPkName_ = input.readBytes(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - updateRule_ = input.readInt32(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - deleteRule_ = input.readInt32(); - break; - } - case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - cols_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000040; - } - cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.PARSER, extensionRegistry)); - break; - } - case 64: { - bitField0_ |= 0x00000040; - enableConstraint_ = input.readBool(); - break; - } - case 72: { - bitField0_ |= 0x00000080; - validateConstraint_ = input.readBool(); - break; - } - case 80: { - bitField0_ |= 0x00000100; - relyConstraint_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ForeignKey parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ForeignKey(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface ForeignKeyColumnOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string column_name = 1; - /** - * required string column_name = 1; - */ - boolean hasColumnName(); - /** - * required string column_name = 1; - */ - java.lang.String getColumnName(); - /** - * required string column_name = 1; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); - - // required string referenced_column_name = 2; - /** - * required string referenced_column_name = 2; - */ - boolean hasReferencedColumnName(); - /** - * required string referenced_column_name = 2; - */ - java.lang.String getReferencedColumnName(); - /** - * required string referenced_column_name = 2; - */ - com.google.protobuf.ByteString - getReferencedColumnNameBytes(); - - // required sint32 key_seq = 3; - /** - * required sint32 key_seq = 3; - */ - boolean hasKeySeq(); - /** - * required sint32 key_seq = 3; - */ - int getKeySeq(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn} - */ - public static final class ForeignKeyColumn extends - com.google.protobuf.GeneratedMessage - implements ForeignKeyColumnOrBuilder { - // Use ForeignKeyColumn.newBuilder() to construct. - private ForeignKeyColumn(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ForeignKeyColumn(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ForeignKeyColumn defaultInstance; - public static ForeignKeyColumn getDefaultInstance() { - return defaultInstance; - } - - public ForeignKeyColumn getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ForeignKeyColumn( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - columnName_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - referencedColumnName_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - keySeq_ = input.readSInt32(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ForeignKeyColumn parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ForeignKeyColumn(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string column_name = 1; - public static final int COLUMN_NAME_FIELD_NUMBER = 1; - private java.lang.Object columnName_; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string referenced_column_name = 2; - public static final int REFERENCED_COLUMN_NAME_FIELD_NUMBER = 2; - private java.lang.Object referencedColumnName_; - /** - * required string referenced_column_name = 2; - */ - public boolean hasReferencedColumnName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string referenced_column_name = 2; - */ - public java.lang.String getReferencedColumnName() { - java.lang.Object ref = referencedColumnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - referencedColumnName_ = s; - } - return s; - } - } - /** - * required string referenced_column_name = 2; - */ - public com.google.protobuf.ByteString - getReferencedColumnNameBytes() { - java.lang.Object ref = referencedColumnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedColumnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required sint32 key_seq = 3; - public static final int KEY_SEQ_FIELD_NUMBER = 3; - private int keySeq_; - /** - * required sint32 key_seq = 3; - */ - public boolean hasKeySeq() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required sint32 key_seq = 3; - */ - public int getKeySeq() { - return keySeq_; - } - - private void initFields() { - columnName_ = ""; - referencedColumnName_ = ""; - keySeq_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasColumnName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasReferencedColumnName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKeySeq()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getReferencedColumnNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeSInt32(3, keySeq_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getReferencedColumnNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt32Size(3, keySeq_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - columnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - referencedColumnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - keySeq_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.columnName_ = columnName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.referencedColumnName_ = referencedColumnName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.keySeq_ = keySeq_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.getDefaultInstance()) return this; - if (other.hasColumnName()) { - bitField0_ |= 0x00000001; - columnName_ = other.columnName_; - onChanged(); - } - if (other.hasReferencedColumnName()) { - bitField0_ |= 0x00000002; - referencedColumnName_ = other.referencedColumnName_; - onChanged(); - } - if (other.hasKeySeq()) { - setKeySeq(other.getKeySeq()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasColumnName()) { - - return false; - } - if (!hasReferencedColumnName()) { - - return false; - } - if (!hasKeySeq()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string column_name = 1; - private java.lang.Object columnName_ = ""; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string column_name = 1; - */ - public Builder setColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000001); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - - // required string referenced_column_name = 2; - private java.lang.Object referencedColumnName_ = ""; - /** - * required string referenced_column_name = 2; - */ - public boolean hasReferencedColumnName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string referenced_column_name = 2; - */ - public java.lang.String getReferencedColumnName() { - java.lang.Object ref = referencedColumnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - referencedColumnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string referenced_column_name = 2; - */ - public com.google.protobuf.ByteString - getReferencedColumnNameBytes() { - java.lang.Object ref = referencedColumnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedColumnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string referenced_column_name = 2; - */ - public Builder setReferencedColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - referencedColumnName_ = value; - onChanged(); - return this; - } - /** - * required string referenced_column_name = 2; - */ - public Builder clearReferencedColumnName() { - bitField0_ = (bitField0_ & ~0x00000002); - referencedColumnName_ = getDefaultInstance().getReferencedColumnName(); - onChanged(); - return this; - } - /** - * required string referenced_column_name = 2; - */ - public Builder setReferencedColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - referencedColumnName_ = value; - onChanged(); - return this; - } - - // required sint32 key_seq = 3; - private int keySeq_ ; - /** - * required sint32 key_seq = 3; - */ - public boolean hasKeySeq() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required sint32 key_seq = 3; - */ - public int getKeySeq() { - return keySeq_; - } - /** - * required sint32 key_seq = 3; - */ - public Builder setKeySeq(int value) { - bitField0_ |= 0x00000004; - keySeq_ = value; - onChanged(); - return this; - } - /** - * required sint32 key_seq = 3; - */ - public Builder clearKeySeq() { - bitField0_ = (bitField0_ & ~0x00000004); - keySeq_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn) - } - - static { - defaultInstance = new ForeignKeyColumn(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn) - } - - private int bitField0_; - // required string fk_name = 1; - public static final int FK_NAME_FIELD_NUMBER = 1; - private java.lang.Object fkName_; - /** - * required string fk_name = 1; - */ - public boolean hasFkName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string fk_name = 1; - */ - public java.lang.String getFkName() { - java.lang.Object ref = fkName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - fkName_ = s; - } - return s; - } - } - /** - * required string fk_name = 1; - */ - public com.google.protobuf.ByteString - getFkNameBytes() { - java.lang.Object ref = fkName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - fkName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string referenced_db_name = 2; - public static final int REFERENCED_DB_NAME_FIELD_NUMBER = 2; - private java.lang.Object referencedDbName_; - /** - * required string referenced_db_name = 2; - */ - public boolean hasReferencedDbName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string referenced_db_name = 2; - */ - public java.lang.String getReferencedDbName() { - java.lang.Object ref = referencedDbName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - referencedDbName_ = s; - } - return s; - } - } - /** - * required string referenced_db_name = 2; - */ - public com.google.protobuf.ByteString - getReferencedDbNameBytes() { - java.lang.Object ref = referencedDbName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedDbName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string referenced_table_name = 3; - public static final int REFERENCED_TABLE_NAME_FIELD_NUMBER = 3; - private java.lang.Object referencedTableName_; - /** - * required string referenced_table_name = 3; - */ - public boolean hasReferencedTableName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string referenced_table_name = 3; - */ - public java.lang.String getReferencedTableName() { - java.lang.Object ref = referencedTableName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - referencedTableName_ = s; - } - return s; - } - } - /** - * required string referenced_table_name = 3; - */ - public com.google.protobuf.ByteString - getReferencedTableNameBytes() { - java.lang.Object ref = referencedTableName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedTableName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string referenced_pk_name = 4; - public static final int REFERENCED_PK_NAME_FIELD_NUMBER = 4; - private java.lang.Object referencedPkName_; - /** - * optional string referenced_pk_name = 4; - */ - public boolean hasReferencedPkName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional string referenced_pk_name = 4; - */ - public java.lang.String getReferencedPkName() { - java.lang.Object ref = referencedPkName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - referencedPkName_ = s; - } - return s; - } - } - /** - * optional string referenced_pk_name = 4; - */ - public com.google.protobuf.ByteString - getReferencedPkNameBytes() { - java.lang.Object ref = referencedPkName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedPkName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional int32 update_rule = 5; - public static final int UPDATE_RULE_FIELD_NUMBER = 5; - private int updateRule_; - /** - * optional int32 update_rule = 5; - */ - public boolean hasUpdateRule() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional int32 update_rule = 5; - */ - public int getUpdateRule() { - return updateRule_; - } - - // optional int32 delete_rule = 6; - public static final int DELETE_RULE_FIELD_NUMBER = 6; - private int deleteRule_; - /** - * optional int32 delete_rule = 6; - */ - public boolean hasDeleteRule() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional int32 delete_rule = 6; - */ - public int getDeleteRule() { - return deleteRule_; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - public static final int COLS_FIELD_NUMBER = 7; - private java.util.List cols_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public java.util.List getColsList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public java.util.List - getColsOrBuilderList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public int getColsCount() { - return cols_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn getCols(int index) { - return cols_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder getColsOrBuilder( - int index) { - return cols_.get(index); - } - - // optional bool enable_constraint = 8; - public static final int ENABLE_CONSTRAINT_FIELD_NUMBER = 8; - private boolean enableConstraint_; - /** - * optional bool enable_constraint = 8; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional bool enable_constraint = 8; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - - // optional bool validate_constraint = 9; - public static final int VALIDATE_CONSTRAINT_FIELD_NUMBER = 9; - private boolean validateConstraint_; - /** - * optional bool validate_constraint = 9; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional bool validate_constraint = 9; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - - // optional bool rely_constraint = 10; - public static final int RELY_CONSTRAINT_FIELD_NUMBER = 10; - private boolean relyConstraint_; - /** - * optional bool rely_constraint = 10; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional bool rely_constraint = 10; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - - private void initFields() { - fkName_ = ""; - referencedDbName_ = ""; - referencedTableName_ = ""; - referencedPkName_ = ""; - updateRule_ = 0; - deleteRule_ = 0; - cols_ = java.util.Collections.emptyList(); - enableConstraint_ = false; - validateConstraint_ = false; - relyConstraint_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFkName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasReferencedDbName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasReferencedTableName()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getFkNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getReferencedDbNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getReferencedTableNameBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, getReferencedPkNameBytes()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeInt32(5, updateRule_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeInt32(6, deleteRule_); - } - for (int i = 0; i < cols_.size(); i++) { - output.writeMessage(7, cols_.get(i)); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeBool(8, enableConstraint_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeBool(9, validateConstraint_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBool(10, relyConstraint_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getFkNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getReferencedDbNameBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getReferencedTableNameBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, getReferencedPkNameBytes()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(5, updateRule_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(6, deleteRule_); - } - for (int i = 0; i < cols_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, cols_.get(i)); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(8, enableConstraint_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(9, validateConstraint_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(10, relyConstraint_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getColsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - fkName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - referencedDbName_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - referencedTableName_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - referencedPkName_ = ""; - bitField0_ = (bitField0_ & ~0x00000008); - updateRule_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - deleteRule_ = 0; - bitField0_ = (bitField0_ & ~0x00000020); - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - } else { - colsBuilder_.clear(); - } - enableConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000080); - validateConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000100); - relyConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000200); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.fkName_ = fkName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.referencedDbName_ = referencedDbName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.referencedTableName_ = referencedTableName_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.referencedPkName_ = referencedPkName_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.updateRule_ = updateRule_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.deleteRule_ = deleteRule_; - if (colsBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.cols_ = cols_; - } else { - result.cols_ = colsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; - } - result.enableConstraint_ = enableConstraint_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000080; - } - result.validateConstraint_ = validateConstraint_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000100; - } - result.relyConstraint_ = relyConstraint_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.getDefaultInstance()) return this; - if (other.hasFkName()) { - bitField0_ |= 0x00000001; - fkName_ = other.fkName_; - onChanged(); - } - if (other.hasReferencedDbName()) { - bitField0_ |= 0x00000002; - referencedDbName_ = other.referencedDbName_; - onChanged(); - } - if (other.hasReferencedTableName()) { - bitField0_ |= 0x00000004; - referencedTableName_ = other.referencedTableName_; - onChanged(); - } - if (other.hasReferencedPkName()) { - bitField0_ |= 0x00000008; - referencedPkName_ = other.referencedPkName_; - onChanged(); - } - if (other.hasUpdateRule()) { - setUpdateRule(other.getUpdateRule()); - } - if (other.hasDeleteRule()) { - setDeleteRule(other.getDeleteRule()); - } - if (colsBuilder_ == null) { - if (!other.cols_.isEmpty()) { - if (cols_.isEmpty()) { - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureColsIsMutable(); - cols_.addAll(other.cols_); - } - onChanged(); - } - } else { - if (!other.cols_.isEmpty()) { - if (colsBuilder_.isEmpty()) { - colsBuilder_.dispose(); - colsBuilder_ = null; - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000040); - colsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColsFieldBuilder() : null; - } else { - colsBuilder_.addAllMessages(other.cols_); - } - } - } - if (other.hasEnableConstraint()) { - setEnableConstraint(other.getEnableConstraint()); - } - if (other.hasValidateConstraint()) { - setValidateConstraint(other.getValidateConstraint()); - } - if (other.hasRelyConstraint()) { - setRelyConstraint(other.getRelyConstraint()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFkName()) { - - return false; - } - if (!hasReferencedDbName()) { - - return false; - } - if (!hasReferencedTableName()) { - - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string fk_name = 1; - private java.lang.Object fkName_ = ""; - /** - * required string fk_name = 1; - */ - public boolean hasFkName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string fk_name = 1; - */ - public java.lang.String getFkName() { - java.lang.Object ref = fkName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - fkName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string fk_name = 1; - */ - public com.google.protobuf.ByteString - getFkNameBytes() { - java.lang.Object ref = fkName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - fkName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string fk_name = 1; - */ - public Builder setFkName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - fkName_ = value; - onChanged(); - return this; - } - /** - * required string fk_name = 1; - */ - public Builder clearFkName() { - bitField0_ = (bitField0_ & ~0x00000001); - fkName_ = getDefaultInstance().getFkName(); - onChanged(); - return this; - } - /** - * required string fk_name = 1; - */ - public Builder setFkNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - fkName_ = value; - onChanged(); - return this; - } - - // required string referenced_db_name = 2; - private java.lang.Object referencedDbName_ = ""; - /** - * required string referenced_db_name = 2; - */ - public boolean hasReferencedDbName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string referenced_db_name = 2; - */ - public java.lang.String getReferencedDbName() { - java.lang.Object ref = referencedDbName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - referencedDbName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string referenced_db_name = 2; - */ - public com.google.protobuf.ByteString - getReferencedDbNameBytes() { - java.lang.Object ref = referencedDbName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedDbName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string referenced_db_name = 2; - */ - public Builder setReferencedDbName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - referencedDbName_ = value; - onChanged(); - return this; - } - /** - * required string referenced_db_name = 2; - */ - public Builder clearReferencedDbName() { - bitField0_ = (bitField0_ & ~0x00000002); - referencedDbName_ = getDefaultInstance().getReferencedDbName(); - onChanged(); - return this; - } - /** - * required string referenced_db_name = 2; - */ - public Builder setReferencedDbNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - referencedDbName_ = value; - onChanged(); - return this; - } - - // required string referenced_table_name = 3; - private java.lang.Object referencedTableName_ = ""; - /** - * required string referenced_table_name = 3; - */ - public boolean hasReferencedTableName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string referenced_table_name = 3; - */ - public java.lang.String getReferencedTableName() { - java.lang.Object ref = referencedTableName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - referencedTableName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string referenced_table_name = 3; - */ - public com.google.protobuf.ByteString - getReferencedTableNameBytes() { - java.lang.Object ref = referencedTableName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedTableName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string referenced_table_name = 3; - */ - public Builder setReferencedTableName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - referencedTableName_ = value; - onChanged(); - return this; - } - /** - * required string referenced_table_name = 3; - */ - public Builder clearReferencedTableName() { - bitField0_ = (bitField0_ & ~0x00000004); - referencedTableName_ = getDefaultInstance().getReferencedTableName(); - onChanged(); - return this; - } - /** - * required string referenced_table_name = 3; - */ - public Builder setReferencedTableNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - referencedTableName_ = value; - onChanged(); - return this; - } - - // optional string referenced_pk_name = 4; - private java.lang.Object referencedPkName_ = ""; - /** - * optional string referenced_pk_name = 4; - */ - public boolean hasReferencedPkName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional string referenced_pk_name = 4; - */ - public java.lang.String getReferencedPkName() { - java.lang.Object ref = referencedPkName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - referencedPkName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string referenced_pk_name = 4; - */ - public com.google.protobuf.ByteString - getReferencedPkNameBytes() { - java.lang.Object ref = referencedPkName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - referencedPkName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string referenced_pk_name = 4; - */ - public Builder setReferencedPkName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - referencedPkName_ = value; - onChanged(); - return this; - } - /** - * optional string referenced_pk_name = 4; - */ - public Builder clearReferencedPkName() { - bitField0_ = (bitField0_ & ~0x00000008); - referencedPkName_ = getDefaultInstance().getReferencedPkName(); - onChanged(); - return this; - } - /** - * optional string referenced_pk_name = 4; - */ - public Builder setReferencedPkNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - referencedPkName_ = value; - onChanged(); - return this; - } - - // optional int32 update_rule = 5; - private int updateRule_ ; - /** - * optional int32 update_rule = 5; - */ - public boolean hasUpdateRule() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional int32 update_rule = 5; - */ - public int getUpdateRule() { - return updateRule_; - } - /** - * optional int32 update_rule = 5; - */ - public Builder setUpdateRule(int value) { - bitField0_ |= 0x00000010; - updateRule_ = value; - onChanged(); - return this; - } - /** - * optional int32 update_rule = 5; - */ - public Builder clearUpdateRule() { - bitField0_ = (bitField0_ & ~0x00000010); - updateRule_ = 0; - onChanged(); - return this; - } - - // optional int32 delete_rule = 6; - private int deleteRule_ ; - /** - * optional int32 delete_rule = 6; - */ - public boolean hasDeleteRule() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional int32 delete_rule = 6; - */ - public int getDeleteRule() { - return deleteRule_; - } - /** - * optional int32 delete_rule = 6; - */ - public Builder setDeleteRule(int value) { - bitField0_ |= 0x00000020; - deleteRule_ = value; - onChanged(); - return this; - } - /** - * optional int32 delete_rule = 6; - */ - public Builder clearDeleteRule() { - bitField0_ = (bitField0_ & ~0x00000020); - deleteRule_ = 0; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - private java.util.List cols_ = - java.util.Collections.emptyList(); - private void ensureColsIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { - cols_ = new java.util.ArrayList(cols_); - bitField0_ |= 0x00000040; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder> colsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public java.util.List getColsList() { - if (colsBuilder_ == null) { - return java.util.Collections.unmodifiableList(cols_); - } else { - return colsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public int getColsCount() { - if (colsBuilder_ == null) { - return cols_.size(); - } else { - return colsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn getCols(int index) { - if (colsBuilder_ == null) { - return cols_.get(index); - } else { - return colsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.set(index, value); - onChanged(); - } else { - colsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.set(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(value); - onChanged(); - } else { - colsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(index, value); - onChanged(); - } else { - colsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder addCols( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder addAllCols( - java.lang.Iterable values) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - super.addAll(values, cols_); - onChanged(); - } else { - colsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder clearCols() { - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - onChanged(); - } else { - colsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public Builder removeCols(int index) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.remove(index); - onChanged(); - } else { - colsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder getColsBuilder( - int index) { - return getColsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder getColsOrBuilder( - int index) { - if (colsBuilder_ == null) { - return cols_.get(index); } else { - return colsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public java.util.List - getColsOrBuilderList() { - if (colsBuilder_ != null) { - return colsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(cols_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder addColsBuilder() { - return getColsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder addColsBuilder( - int index) { - return getColsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey.ForeignKeyColumn cols = 7; - */ - public java.util.List - getColsBuilderList() { - return getColsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder> - getColsFieldBuilder() { - if (colsBuilder_ == null) { - colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumnOrBuilder>( - cols_, - ((bitField0_ & 0x00000040) == 0x00000040), - getParentForChildren(), - isClean()); - cols_ = null; - } - return colsBuilder_; - } - - // optional bool enable_constraint = 8; - private boolean enableConstraint_ ; - /** - * optional bool enable_constraint = 8; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional bool enable_constraint = 8; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - /** - * optional bool enable_constraint = 8; - */ - public Builder setEnableConstraint(boolean value) { - bitField0_ |= 0x00000080; - enableConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool enable_constraint = 8; - */ - public Builder clearEnableConstraint() { - bitField0_ = (bitField0_ & ~0x00000080); - enableConstraint_ = false; - onChanged(); - return this; - } - - // optional bool validate_constraint = 9; - private boolean validateConstraint_ ; - /** - * optional bool validate_constraint = 9; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional bool validate_constraint = 9; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - /** - * optional bool validate_constraint = 9; - */ - public Builder setValidateConstraint(boolean value) { - bitField0_ |= 0x00000100; - validateConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool validate_constraint = 9; - */ - public Builder clearValidateConstraint() { - bitField0_ = (bitField0_ & ~0x00000100); - validateConstraint_ = false; - onChanged(); - return this; - } - - // optional bool rely_constraint = 10; - private boolean relyConstraint_ ; - /** - * optional bool rely_constraint = 10; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional bool rely_constraint = 10; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - /** - * optional bool rely_constraint = 10; - */ - public Builder setRelyConstraint(boolean value) { - bitField0_ |= 0x00000200; - relyConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool rely_constraint = 10; - */ - public Builder clearRelyConstraint() { - bitField0_ = (bitField0_ & ~0x00000200); - relyConstraint_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey) - } - - static { - defaultInstance = new ForeignKey(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey) - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - public static final int FKS_FIELD_NUMBER = 1; - private java.util.List fks_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public java.util.List getFksList() { - return fks_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public java.util.List - getFksOrBuilderList() { - return fks_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public int getFksCount() { - return fks_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey getFks(int index) { - return fks_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder getFksOrBuilder( - int index) { - return fks_.get(index); - } - - private void initFields() { - fks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getFksCount(); i++) { - if (!getFks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < fks_.size(); i++) { - output.writeMessage(1, fks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < fks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, fks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ForeignKeys} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeysOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getFksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (fksBuilder_ == null) { - fks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - fksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys(this); - int from_bitField0_ = bitField0_; - if (fksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - fks_ = java.util.Collections.unmodifiableList(fks_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.fks_ = fks_; - } else { - result.fks_ = fksBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.getDefaultInstance()) return this; - if (fksBuilder_ == null) { - if (!other.fks_.isEmpty()) { - if (fks_.isEmpty()) { - fks_ = other.fks_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureFksIsMutable(); - fks_.addAll(other.fks_); - } - onChanged(); - } - } else { - if (!other.fks_.isEmpty()) { - if (fksBuilder_.isEmpty()) { - fksBuilder_.dispose(); - fksBuilder_ = null; - fks_ = other.fks_; - bitField0_ = (bitField0_ & ~0x00000001); - fksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getFksFieldBuilder() : null; - } else { - fksBuilder_.addAllMessages(other.fks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getFksCount(); i++) { - if (!getFks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - private java.util.List fks_ = - java.util.Collections.emptyList(); - private void ensureFksIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - fks_ = new java.util.ArrayList(fks_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder> fksBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public java.util.List getFksList() { - if (fksBuilder_ == null) { - return java.util.Collections.unmodifiableList(fks_); - } else { - return fksBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public int getFksCount() { - if (fksBuilder_ == null) { - return fks_.size(); - } else { - return fksBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey getFks(int index) { - if (fksBuilder_ == null) { - return fks_.get(index); - } else { - return fksBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder setFks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey value) { - if (fksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFksIsMutable(); - fks_.set(index, value); - onChanged(); - } else { - fksBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder setFks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder builderForValue) { - if (fksBuilder_ == null) { - ensureFksIsMutable(); - fks_.set(index, builderForValue.build()); - onChanged(); - } else { - fksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder addFks(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey value) { - if (fksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFksIsMutable(); - fks_.add(value); - onChanged(); - } else { - fksBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder addFks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey value) { - if (fksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFksIsMutable(); - fks_.add(index, value); - onChanged(); - } else { - fksBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder addFks( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder builderForValue) { - if (fksBuilder_ == null) { - ensureFksIsMutable(); - fks_.add(builderForValue.build()); - onChanged(); - } else { - fksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder addFks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder builderForValue) { - if (fksBuilder_ == null) { - ensureFksIsMutable(); - fks_.add(index, builderForValue.build()); - onChanged(); - } else { - fksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder addAllFks( - java.lang.Iterable values) { - if (fksBuilder_ == null) { - ensureFksIsMutable(); - super.addAll(values, fks_); - onChanged(); - } else { - fksBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder clearFks() { - if (fksBuilder_ == null) { - fks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - fksBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public Builder removeFks(int index) { - if (fksBuilder_ == null) { - ensureFksIsMutable(); - fks_.remove(index); - onChanged(); - } else { - fksBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder getFksBuilder( - int index) { - return getFksFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder getFksOrBuilder( - int index) { - if (fksBuilder_ == null) { - return fks_.get(index); } else { - return fksBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public java.util.List - getFksOrBuilderList() { - if (fksBuilder_ != null) { - return fksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(fks_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder addFksBuilder() { - return getFksFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder addFksBuilder( - int index) { - return getFksFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.ForeignKeys.ForeignKey fks = 1; - */ - public java.util.List - getFksBuilderList() { - return getFksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder> - getFksFieldBuilder() { - if (fksBuilder_ == null) { - fksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ForeignKeys.ForeignKeyOrBuilder>( - fks_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - fks_ = null; - } - return fksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ForeignKeys) - } - - static { - defaultInstance = new ForeignKeys(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ForeignKeys) - } - - public interface UniqueConstraintsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - java.util.List - getUksList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint getUks(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - int getUksCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - java.util.List - getUksOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder getUksOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.UniqueConstraints} - */ - public static final class UniqueConstraints extends - com.google.protobuf.GeneratedMessage - implements UniqueConstraintsOrBuilder { - // Use UniqueConstraints.newBuilder() to construct. - private UniqueConstraints(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private UniqueConstraints(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final UniqueConstraints defaultInstance; - public static UniqueConstraints getDefaultInstance() { - return defaultInstance; - } - - public UniqueConstraints getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private UniqueConstraints( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - uks_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - uks_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - uks_ = java.util.Collections.unmodifiableList(uks_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public UniqueConstraints parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UniqueConstraints(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface UniqueConstraintOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string uk_name = 1; - /** - * required string uk_name = 1; - */ - boolean hasUkName(); - /** - * required string uk_name = 1; - */ - java.lang.String getUkName(); - /** - * required string uk_name = 1; - */ - com.google.protobuf.ByteString - getUkNameBytes(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - java.util.List - getColsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn getCols(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - int getColsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - java.util.List - getColsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder getColsOrBuilder( - int index); - - // optional bool enable_constraint = 3; - /** - * optional bool enable_constraint = 3; - */ - boolean hasEnableConstraint(); - /** - * optional bool enable_constraint = 3; - */ - boolean getEnableConstraint(); - - // optional bool validate_constraint = 4; - /** - * optional bool validate_constraint = 4; - */ - boolean hasValidateConstraint(); - /** - * optional bool validate_constraint = 4; - */ - boolean getValidateConstraint(); - - // optional bool rely_constraint = 5; - /** - * optional bool rely_constraint = 5; - */ - boolean hasRelyConstraint(); - /** - * optional bool rely_constraint = 5; - */ - boolean getRelyConstraint(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint} - */ - public static final class UniqueConstraint extends - com.google.protobuf.GeneratedMessage - implements UniqueConstraintOrBuilder { - // Use UniqueConstraint.newBuilder() to construct. - private UniqueConstraint(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private UniqueConstraint(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final UniqueConstraint defaultInstance; - public static UniqueConstraint getDefaultInstance() { - return defaultInstance; - } - - public UniqueConstraint getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private UniqueConstraint( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - ukName_ = input.readBytes(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.PARSER, extensionRegistry)); - break; - } - case 24: { - bitField0_ |= 0x00000002; - enableConstraint_ = input.readBool(); - break; - } - case 32: { - bitField0_ |= 0x00000004; - validateConstraint_ = input.readBool(); - break; - } - case 40: { - bitField0_ |= 0x00000008; - relyConstraint_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public UniqueConstraint parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UniqueConstraint(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface UniqueConstraintColumnOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string column_name = 1; - /** - * required string column_name = 1; - */ - boolean hasColumnName(); - /** - * required string column_name = 1; - */ - java.lang.String getColumnName(); - /** - * required string column_name = 1; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); - - // required sint32 key_seq = 2; - /** - * required sint32 key_seq = 2; - */ - boolean hasKeySeq(); - /** - * required sint32 key_seq = 2; - */ - int getKeySeq(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn} - */ - public static final class UniqueConstraintColumn extends - com.google.protobuf.GeneratedMessage - implements UniqueConstraintColumnOrBuilder { - // Use UniqueConstraintColumn.newBuilder() to construct. - private UniqueConstraintColumn(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private UniqueConstraintColumn(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final UniqueConstraintColumn defaultInstance; - public static UniqueConstraintColumn getDefaultInstance() { - return defaultInstance; - } - - public UniqueConstraintColumn getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private UniqueConstraintColumn( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - columnName_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - keySeq_ = input.readSInt32(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public UniqueConstraintColumn parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UniqueConstraintColumn(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string column_name = 1; - public static final int COLUMN_NAME_FIELD_NUMBER = 1; - private java.lang.Object columnName_; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required sint32 key_seq = 2; - public static final int KEY_SEQ_FIELD_NUMBER = 2; - private int keySeq_; - /** - * required sint32 key_seq = 2; - */ - public boolean hasKeySeq() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required sint32 key_seq = 2; - */ - public int getKeySeq() { - return keySeq_; - } - - private void initFields() { - columnName_ = ""; - keySeq_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasColumnName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasKeySeq()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeSInt32(2, keySeq_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getColumnNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeSInt32Size(2, keySeq_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - columnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - keySeq_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.columnName_ = columnName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.keySeq_ = keySeq_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.getDefaultInstance()) return this; - if (other.hasColumnName()) { - bitField0_ |= 0x00000001; - columnName_ = other.columnName_; - onChanged(); - } - if (other.hasKeySeq()) { - setKeySeq(other.getKeySeq()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasColumnName()) { - - return false; - } - if (!hasKeySeq()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string column_name = 1; - private java.lang.Object columnName_ = ""; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string column_name = 1; - */ - public Builder setColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000001); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - - // required sint32 key_seq = 2; - private int keySeq_ ; - /** - * required sint32 key_seq = 2; - */ - public boolean hasKeySeq() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required sint32 key_seq = 2; - */ - public int getKeySeq() { - return keySeq_; - } - /** - * required sint32 key_seq = 2; - */ - public Builder setKeySeq(int value) { - bitField0_ |= 0x00000002; - keySeq_ = value; - onChanged(); - return this; - } - /** - * required sint32 key_seq = 2; - */ - public Builder clearKeySeq() { - bitField0_ = (bitField0_ & ~0x00000002); - keySeq_ = 0; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn) - } - - static { - defaultInstance = new UniqueConstraintColumn(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn) - } - - private int bitField0_; - // required string uk_name = 1; - public static final int UK_NAME_FIELD_NUMBER = 1; - private java.lang.Object ukName_; - /** - * required string uk_name = 1; - */ - public boolean hasUkName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string uk_name = 1; - */ - public java.lang.String getUkName() { - java.lang.Object ref = ukName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - ukName_ = s; - } - return s; - } - } - /** - * required string uk_name = 1; - */ - public com.google.protobuf.ByteString - getUkNameBytes() { - java.lang.Object ref = ukName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ukName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - public static final int COLS_FIELD_NUMBER = 2; - private java.util.List cols_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public java.util.List getColsList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public java.util.List - getColsOrBuilderList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public int getColsCount() { - return cols_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn getCols(int index) { - return cols_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder getColsOrBuilder( - int index) { - return cols_.get(index); - } - - // optional bool enable_constraint = 3; - public static final int ENABLE_CONSTRAINT_FIELD_NUMBER = 3; - private boolean enableConstraint_; - /** - * optional bool enable_constraint = 3; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool enable_constraint = 3; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - - // optional bool validate_constraint = 4; - public static final int VALIDATE_CONSTRAINT_FIELD_NUMBER = 4; - private boolean validateConstraint_; - /** - * optional bool validate_constraint = 4; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool validate_constraint = 4; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - - // optional bool rely_constraint = 5; - public static final int RELY_CONSTRAINT_FIELD_NUMBER = 5; - private boolean relyConstraint_; - /** - * optional bool rely_constraint = 5; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool rely_constraint = 5; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - - private void initFields() { - ukName_ = ""; - cols_ = java.util.Collections.emptyList(); - enableConstraint_ = false; - validateConstraint_ = false; - relyConstraint_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasUkName()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getUkNameBytes()); - } - for (int i = 0; i < cols_.size(); i++) { - output.writeMessage(2, cols_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(3, enableConstraint_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(4, validateConstraint_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(5, relyConstraint_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getUkNameBytes()); - } - for (int i = 0; i < cols_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, cols_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, enableConstraint_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, validateConstraint_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, relyConstraint_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getColsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - ukName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - colsBuilder_.clear(); - } - enableConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - validateConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - relyConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.ukName_ = ukName_; - if (colsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.cols_ = cols_; - } else { - result.cols_ = colsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.enableConstraint_ = enableConstraint_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.validateConstraint_ = validateConstraint_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.relyConstraint_ = relyConstraint_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.getDefaultInstance()) return this; - if (other.hasUkName()) { - bitField0_ |= 0x00000001; - ukName_ = other.ukName_; - onChanged(); - } - if (colsBuilder_ == null) { - if (!other.cols_.isEmpty()) { - if (cols_.isEmpty()) { - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureColsIsMutable(); - cols_.addAll(other.cols_); - } - onChanged(); - } - } else { - if (!other.cols_.isEmpty()) { - if (colsBuilder_.isEmpty()) { - colsBuilder_.dispose(); - colsBuilder_ = null; - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000002); - colsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColsFieldBuilder() : null; - } else { - colsBuilder_.addAllMessages(other.cols_); - } - } - } - if (other.hasEnableConstraint()) { - setEnableConstraint(other.getEnableConstraint()); - } - if (other.hasValidateConstraint()) { - setValidateConstraint(other.getValidateConstraint()); - } - if (other.hasRelyConstraint()) { - setRelyConstraint(other.getRelyConstraint()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasUkName()) { - - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string uk_name = 1; - private java.lang.Object ukName_ = ""; - /** - * required string uk_name = 1; - */ - public boolean hasUkName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string uk_name = 1; - */ - public java.lang.String getUkName() { - java.lang.Object ref = ukName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - ukName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string uk_name = 1; - */ - public com.google.protobuf.ByteString - getUkNameBytes() { - java.lang.Object ref = ukName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - ukName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string uk_name = 1; - */ - public Builder setUkName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - ukName_ = value; - onChanged(); - return this; - } - /** - * required string uk_name = 1; - */ - public Builder clearUkName() { - bitField0_ = (bitField0_ & ~0x00000001); - ukName_ = getDefaultInstance().getUkName(); - onChanged(); - return this; - } - /** - * required string uk_name = 1; - */ - public Builder setUkNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - ukName_ = value; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - private java.util.List cols_ = - java.util.Collections.emptyList(); - private void ensureColsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = new java.util.ArrayList(cols_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder> colsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public java.util.List getColsList() { - if (colsBuilder_ == null) { - return java.util.Collections.unmodifiableList(cols_); - } else { - return colsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public int getColsCount() { - if (colsBuilder_ == null) { - return cols_.size(); - } else { - return colsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn getCols(int index) { - if (colsBuilder_ == null) { - return cols_.get(index); - } else { - return colsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.set(index, value); - onChanged(); - } else { - colsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.set(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(value); - onChanged(); - } else { - colsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(index, value); - onChanged(); - } else { - colsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder addCols( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder addAllCols( - java.lang.Iterable values) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - super.addAll(values, cols_); - onChanged(); - } else { - colsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder clearCols() { - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - colsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public Builder removeCols(int index) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.remove(index); - onChanged(); - } else { - colsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder getColsBuilder( - int index) { - return getColsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder getColsOrBuilder( - int index) { - if (colsBuilder_ == null) { - return cols_.get(index); } else { - return colsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public java.util.List - getColsOrBuilderList() { - if (colsBuilder_ != null) { - return colsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(cols_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder addColsBuilder() { - return getColsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder addColsBuilder( - int index) { - return getColsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn cols = 2; - */ - public java.util.List - getColsBuilderList() { - return getColsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder> - getColsFieldBuilder() { - if (colsBuilder_ == null) { - colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumnOrBuilder>( - cols_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - cols_ = null; - } - return colsBuilder_; - } - - // optional bool enable_constraint = 3; - private boolean enableConstraint_ ; - /** - * optional bool enable_constraint = 3; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool enable_constraint = 3; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - /** - * optional bool enable_constraint = 3; - */ - public Builder setEnableConstraint(boolean value) { - bitField0_ |= 0x00000004; - enableConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool enable_constraint = 3; - */ - public Builder clearEnableConstraint() { - bitField0_ = (bitField0_ & ~0x00000004); - enableConstraint_ = false; - onChanged(); - return this; - } - - // optional bool validate_constraint = 4; - private boolean validateConstraint_ ; - /** - * optional bool validate_constraint = 4; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool validate_constraint = 4; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - /** - * optional bool validate_constraint = 4; - */ - public Builder setValidateConstraint(boolean value) { - bitField0_ |= 0x00000008; - validateConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool validate_constraint = 4; - */ - public Builder clearValidateConstraint() { - bitField0_ = (bitField0_ & ~0x00000008); - validateConstraint_ = false; - onChanged(); - return this; - } - - // optional bool rely_constraint = 5; - private boolean relyConstraint_ ; - /** - * optional bool rely_constraint = 5; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool rely_constraint = 5; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - /** - * optional bool rely_constraint = 5; - */ - public Builder setRelyConstraint(boolean value) { - bitField0_ |= 0x00000010; - relyConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool rely_constraint = 5; - */ - public Builder clearRelyConstraint() { - bitField0_ = (bitField0_ & ~0x00000010); - relyConstraint_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint) - } - - static { - defaultInstance = new UniqueConstraint(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint) - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - public static final int UKS_FIELD_NUMBER = 1; - private java.util.List uks_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public java.util.List getUksList() { - return uks_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public java.util.List - getUksOrBuilderList() { - return uks_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public int getUksCount() { - return uks_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint getUks(int index) { - return uks_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder getUksOrBuilder( - int index) { - return uks_.get(index); - } - - private void initFields() { - uks_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getUksCount(); i++) { - if (!getUks(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < uks_.size(); i++) { - output.writeMessage(1, uks_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < uks_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, uks_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.UniqueConstraints} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraintsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getUksFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (uksBuilder_ == null) { - uks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - uksBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints(this); - int from_bitField0_ = bitField0_; - if (uksBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - uks_ = java.util.Collections.unmodifiableList(uks_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.uks_ = uks_; - } else { - result.uks_ = uksBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.getDefaultInstance()) return this; - if (uksBuilder_ == null) { - if (!other.uks_.isEmpty()) { - if (uks_.isEmpty()) { - uks_ = other.uks_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureUksIsMutable(); - uks_.addAll(other.uks_); - } - onChanged(); - } - } else { - if (!other.uks_.isEmpty()) { - if (uksBuilder_.isEmpty()) { - uksBuilder_.dispose(); - uksBuilder_ = null; - uks_ = other.uks_; - bitField0_ = (bitField0_ & ~0x00000001); - uksBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getUksFieldBuilder() : null; - } else { - uksBuilder_.addAllMessages(other.uks_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getUksCount(); i++) { - if (!getUks(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - private java.util.List uks_ = - java.util.Collections.emptyList(); - private void ensureUksIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - uks_ = new java.util.ArrayList(uks_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder> uksBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public java.util.List getUksList() { - if (uksBuilder_ == null) { - return java.util.Collections.unmodifiableList(uks_); - } else { - return uksBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public int getUksCount() { - if (uksBuilder_ == null) { - return uks_.size(); - } else { - return uksBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint getUks(int index) { - if (uksBuilder_ == null) { - return uks_.get(index); - } else { - return uksBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder setUks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint value) { - if (uksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureUksIsMutable(); - uks_.set(index, value); - onChanged(); - } else { - uksBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder setUks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder builderForValue) { - if (uksBuilder_ == null) { - ensureUksIsMutable(); - uks_.set(index, builderForValue.build()); - onChanged(); - } else { - uksBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder addUks(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint value) { - if (uksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureUksIsMutable(); - uks_.add(value); - onChanged(); - } else { - uksBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder addUks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint value) { - if (uksBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureUksIsMutable(); - uks_.add(index, value); - onChanged(); - } else { - uksBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder addUks( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder builderForValue) { - if (uksBuilder_ == null) { - ensureUksIsMutable(); - uks_.add(builderForValue.build()); - onChanged(); - } else { - uksBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder addUks( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder builderForValue) { - if (uksBuilder_ == null) { - ensureUksIsMutable(); - uks_.add(index, builderForValue.build()); - onChanged(); - } else { - uksBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder addAllUks( - java.lang.Iterable values) { - if (uksBuilder_ == null) { - ensureUksIsMutable(); - super.addAll(values, uks_); - onChanged(); - } else { - uksBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder clearUks() { - if (uksBuilder_ == null) { - uks_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - uksBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public Builder removeUks(int index) { - if (uksBuilder_ == null) { - ensureUksIsMutable(); - uks_.remove(index); - onChanged(); - } else { - uksBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder getUksBuilder( - int index) { - return getUksFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder getUksOrBuilder( - int index) { - if (uksBuilder_ == null) { - return uks_.get(index); } else { - return uksBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public java.util.List - getUksOrBuilderList() { - if (uksBuilder_ != null) { - return uksBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(uks_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder addUksBuilder() { - return getUksFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder addUksBuilder( - int index) { - return getUksFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.UniqueConstraints.UniqueConstraint uks = 1; - */ - public java.util.List - getUksBuilderList() { - return getUksFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder> - getUksFieldBuilder() { - if (uksBuilder_ == null) { - uksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.UniqueConstraints.UniqueConstraintOrBuilder>( - uks_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - uks_ = null; - } - return uksBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.UniqueConstraints) - } - - static { - defaultInstance = new UniqueConstraints(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.UniqueConstraints) - } - - public interface NotNullConstraintsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - java.util.List - getNnsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint getNns(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - int getNnsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - java.util.List - getNnsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder getNnsOrBuilder( - int index); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.NotNullConstraints} - */ - public static final class NotNullConstraints extends - com.google.protobuf.GeneratedMessage - implements NotNullConstraintsOrBuilder { - // Use NotNullConstraints.newBuilder() to construct. - private NotNullConstraints(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private NotNullConstraints(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final NotNullConstraints defaultInstance; - public static NotNullConstraints getDefaultInstance() { - return defaultInstance; - } - - public NotNullConstraints getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private NotNullConstraints( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - nns_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - nns_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - nns_ = java.util.Collections.unmodifiableList(nns_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public NotNullConstraints parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new NotNullConstraints(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface NotNullConstraintOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string nn_name = 1; - /** - * required string nn_name = 1; - */ - boolean hasNnName(); - /** - * required string nn_name = 1; - */ - java.lang.String getNnName(); - /** - * required string nn_name = 1; - */ - com.google.protobuf.ByteString - getNnNameBytes(); - - // repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - java.util.List - getColsList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn getCols(int index); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - int getColsCount(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - java.util.List - getColsOrBuilderList(); - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder getColsOrBuilder( - int index); - - // optional bool enable_constraint = 3; - /** - * optional bool enable_constraint = 3; - */ - boolean hasEnableConstraint(); - /** - * optional bool enable_constraint = 3; - */ - boolean getEnableConstraint(); - - // optional bool validate_constraint = 4; - /** - * optional bool validate_constraint = 4; - */ - boolean hasValidateConstraint(); - /** - * optional bool validate_constraint = 4; - */ - boolean getValidateConstraint(); - - // optional bool rely_constraint = 5; - /** - * optional bool rely_constraint = 5; - */ - boolean hasRelyConstraint(); - /** - * optional bool rely_constraint = 5; - */ - boolean getRelyConstraint(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint} - */ - public static final class NotNullConstraint extends - com.google.protobuf.GeneratedMessage - implements NotNullConstraintOrBuilder { - // Use NotNullConstraint.newBuilder() to construct. - private NotNullConstraint(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private NotNullConstraint(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final NotNullConstraint defaultInstance; - public static NotNullConstraint getDefaultInstance() { - return defaultInstance; - } - - public NotNullConstraint getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private NotNullConstraint( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - nnName_ = input.readBytes(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.PARSER, extensionRegistry)); - break; - } - case 24: { - bitField0_ |= 0x00000002; - enableConstraint_ = input.readBool(); - break; - } - case 32: { - bitField0_ |= 0x00000004; - validateConstraint_ = input.readBool(); - break; - } - case 40: { - bitField0_ |= 0x00000008; - relyConstraint_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public NotNullConstraint parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new NotNullConstraint(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public interface NotNullConstraintColumnOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string column_name = 1; - /** - * required string column_name = 1; - */ - boolean hasColumnName(); - /** - * required string column_name = 1; - */ - java.lang.String getColumnName(); - /** - * required string column_name = 1; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn} - */ - public static final class NotNullConstraintColumn extends - com.google.protobuf.GeneratedMessage - implements NotNullConstraintColumnOrBuilder { - // Use NotNullConstraintColumn.newBuilder() to construct. - private NotNullConstraintColumn(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private NotNullConstraintColumn(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final NotNullConstraintColumn defaultInstance; - public static NotNullConstraintColumn getDefaultInstance() { - return defaultInstance; - } - - public NotNullConstraintColumn getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private NotNullConstraintColumn( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - columnName_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public NotNullConstraintColumn parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new NotNullConstraintColumn(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string column_name = 1; - public static final int COLUMN_NAME_FIELD_NUMBER = 1; - private java.lang.Object columnName_; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - columnName_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasColumnName()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getColumnNameBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getColumnNameBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - columnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.columnName_ = columnName_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.getDefaultInstance()) return this; - if (other.hasColumnName()) { - bitField0_ |= 0x00000001; - columnName_ = other.columnName_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasColumnName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string column_name = 1; - private java.lang.Object columnName_ = ""; - /** - * required string column_name = 1; - */ - public boolean hasColumnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string column_name = 1; - */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_name = 1; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string column_name = 1; - */ - public Builder setColumnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder clearColumnName() { - bitField0_ = (bitField0_ & ~0x00000001); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * required string column_name = 1; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - columnName_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn) - } - - static { - defaultInstance = new NotNullConstraintColumn(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn) - } - - private int bitField0_; - // required string nn_name = 1; - public static final int NN_NAME_FIELD_NUMBER = 1; - private java.lang.Object nnName_; - /** - * required string nn_name = 1; - */ - public boolean hasNnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string nn_name = 1; - */ - public java.lang.String getNnName() { - java.lang.Object ref = nnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - nnName_ = s; - } - return s; - } - } - /** - * required string nn_name = 1; - */ - public com.google.protobuf.ByteString - getNnNameBytes() { - java.lang.Object ref = nnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - nnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - public static final int COLS_FIELD_NUMBER = 2; - private java.util.List cols_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public java.util.List getColsList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public java.util.List - getColsOrBuilderList() { - return cols_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public int getColsCount() { - return cols_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn getCols(int index) { - return cols_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder getColsOrBuilder( - int index) { - return cols_.get(index); - } - - // optional bool enable_constraint = 3; - public static final int ENABLE_CONSTRAINT_FIELD_NUMBER = 3; - private boolean enableConstraint_; - /** - * optional bool enable_constraint = 3; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool enable_constraint = 3; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - - // optional bool validate_constraint = 4; - public static final int VALIDATE_CONSTRAINT_FIELD_NUMBER = 4; - private boolean validateConstraint_; - /** - * optional bool validate_constraint = 4; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool validate_constraint = 4; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - - // optional bool rely_constraint = 5; - public static final int RELY_CONSTRAINT_FIELD_NUMBER = 5; - private boolean relyConstraint_; - /** - * optional bool rely_constraint = 5; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool rely_constraint = 5; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - - private void initFields() { - nnName_ = ""; - cols_ = java.util.Collections.emptyList(); - enableConstraint_ = false; - validateConstraint_ = false; - relyConstraint_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasNnName()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNnNameBytes()); - } - for (int i = 0; i < cols_.size(); i++) { - output.writeMessage(2, cols_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(3, enableConstraint_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(4, validateConstraint_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(5, relyConstraint_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNnNameBytes()); - } - for (int i = 0; i < cols_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, cols_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, enableConstraint_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, validateConstraint_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, relyConstraint_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getColsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - nnName_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - colsBuilder_.clear(); - } - enableConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000004); - validateConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - relyConstraint_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.nnName_ = nnName_; - if (colsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = java.util.Collections.unmodifiableList(cols_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.cols_ = cols_; - } else { - result.cols_ = colsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.enableConstraint_ = enableConstraint_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.validateConstraint_ = validateConstraint_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.relyConstraint_ = relyConstraint_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.getDefaultInstance()) return this; - if (other.hasNnName()) { - bitField0_ |= 0x00000001; - nnName_ = other.nnName_; - onChanged(); - } - if (colsBuilder_ == null) { - if (!other.cols_.isEmpty()) { - if (cols_.isEmpty()) { - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureColsIsMutable(); - cols_.addAll(other.cols_); - } - onChanged(); - } - } else { - if (!other.cols_.isEmpty()) { - if (colsBuilder_.isEmpty()) { - colsBuilder_.dispose(); - colsBuilder_ = null; - cols_ = other.cols_; - bitField0_ = (bitField0_ & ~0x00000002); - colsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColsFieldBuilder() : null; - } else { - colsBuilder_.addAllMessages(other.cols_); - } - } - } - if (other.hasEnableConstraint()) { - setEnableConstraint(other.getEnableConstraint()); - } - if (other.hasValidateConstraint()) { - setValidateConstraint(other.getValidateConstraint()); - } - if (other.hasRelyConstraint()) { - setRelyConstraint(other.getRelyConstraint()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasNnName()) { - - return false; - } - for (int i = 0; i < getColsCount(); i++) { - if (!getCols(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string nn_name = 1; - private java.lang.Object nnName_ = ""; - /** - * required string nn_name = 1; - */ - public boolean hasNnName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string nn_name = 1; - */ - public java.lang.String getNnName() { - java.lang.Object ref = nnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - nnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string nn_name = 1; - */ - public com.google.protobuf.ByteString - getNnNameBytes() { - java.lang.Object ref = nnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - nnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string nn_name = 1; - */ - public Builder setNnName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - nnName_ = value; - onChanged(); - return this; - } - /** - * required string nn_name = 1; - */ - public Builder clearNnName() { - bitField0_ = (bitField0_ & ~0x00000001); - nnName_ = getDefaultInstance().getNnName(); - onChanged(); - return this; - } - /** - * required string nn_name = 1; - */ - public Builder setNnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - nnName_ = value; - onChanged(); - return this; - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - private java.util.List cols_ = - java.util.Collections.emptyList(); - private void ensureColsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - cols_ = new java.util.ArrayList(cols_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder> colsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public java.util.List getColsList() { - if (colsBuilder_ == null) { - return java.util.Collections.unmodifiableList(cols_); - } else { - return colsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public int getColsCount() { - if (colsBuilder_ == null) { - return cols_.size(); - } else { - return colsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn getCols(int index) { - if (colsBuilder_ == null) { - return cols_.get(index); - } else { - return colsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.set(index, value); - onChanged(); - } else { - colsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder setCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.set(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(value); - onChanged(); - } else { - colsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn value) { - if (colsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureColsIsMutable(); - cols_.add(index, value); - onChanged(); - } else { - colsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder addCols( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder addCols( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder builderForValue) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.add(index, builderForValue.build()); - onChanged(); - } else { - colsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder addAllCols( - java.lang.Iterable values) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - super.addAll(values, cols_); - onChanged(); - } else { - colsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder clearCols() { - if (colsBuilder_ == null) { - cols_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - colsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public Builder removeCols(int index) { - if (colsBuilder_ == null) { - ensureColsIsMutable(); - cols_.remove(index); - onChanged(); - } else { - colsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder getColsBuilder( - int index) { - return getColsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder getColsOrBuilder( - int index) { - if (colsBuilder_ == null) { - return cols_.get(index); } else { - return colsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public java.util.List - getColsOrBuilderList() { - if (colsBuilder_ != null) { - return colsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(cols_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder addColsBuilder() { - return getColsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder addColsBuilder( - int index) { - return getColsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn cols = 2; - */ - public java.util.List - getColsBuilderList() { - return getColsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder> - getColsFieldBuilder() { - if (colsBuilder_ == null) { - colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumnOrBuilder>( - cols_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - cols_ = null; - } - return colsBuilder_; - } - - // optional bool enable_constraint = 3; - private boolean enableConstraint_ ; - /** - * optional bool enable_constraint = 3; - */ - public boolean hasEnableConstraint() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool enable_constraint = 3; - */ - public boolean getEnableConstraint() { - return enableConstraint_; - } - /** - * optional bool enable_constraint = 3; - */ - public Builder setEnableConstraint(boolean value) { - bitField0_ |= 0x00000004; - enableConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool enable_constraint = 3; - */ - public Builder clearEnableConstraint() { - bitField0_ = (bitField0_ & ~0x00000004); - enableConstraint_ = false; - onChanged(); - return this; - } - - // optional bool validate_constraint = 4; - private boolean validateConstraint_ ; - /** - * optional bool validate_constraint = 4; - */ - public boolean hasValidateConstraint() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool validate_constraint = 4; - */ - public boolean getValidateConstraint() { - return validateConstraint_; - } - /** - * optional bool validate_constraint = 4; - */ - public Builder setValidateConstraint(boolean value) { - bitField0_ |= 0x00000008; - validateConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool validate_constraint = 4; - */ - public Builder clearValidateConstraint() { - bitField0_ = (bitField0_ & ~0x00000008); - validateConstraint_ = false; - onChanged(); - return this; - } - - // optional bool rely_constraint = 5; - private boolean relyConstraint_ ; - /** - * optional bool rely_constraint = 5; - */ - public boolean hasRelyConstraint() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool rely_constraint = 5; - */ - public boolean getRelyConstraint() { - return relyConstraint_; - } - /** - * optional bool rely_constraint = 5; - */ - public Builder setRelyConstraint(boolean value) { - bitField0_ |= 0x00000010; - relyConstraint_ = value; - onChanged(); - return this; - } - /** - * optional bool rely_constraint = 5; - */ - public Builder clearRelyConstraint() { - bitField0_ = (bitField0_ & ~0x00000010); - relyConstraint_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint) - } - - static { - defaultInstance = new NotNullConstraint(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint) - } - - // repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - public static final int NNS_FIELD_NUMBER = 1; - private java.util.List nns_; - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public java.util.List getNnsList() { - return nns_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public java.util.List - getNnsOrBuilderList() { - return nns_; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public int getNnsCount() { - return nns_.size(); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint getNns(int index) { - return nns_.get(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder getNnsOrBuilder( - int index) { - return nns_.get(index); - } - - private void initFields() { - nns_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getNnsCount(); i++) { - if (!getNns(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < nns_.size(); i++) { - output.writeMessage(1, nns_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < nns_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, nns_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.NotNullConstraints} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraintsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.Builder.class); - } - - // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getNnsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (nnsBuilder_ == null) { - nns_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - nnsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints getDefaultInstanceForType() { - return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.getDefaultInstance(); - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints build() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints buildPartial() { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints(this); - int from_bitField0_ = bitField0_; - if (nnsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - nns_ = java.util.Collections.unmodifiableList(nns_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.nns_ = nns_; - } else { - result.nns_ = nnsBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints) { - return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints other) { - if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.getDefaultInstance()) return this; - if (nnsBuilder_ == null) { - if (!other.nns_.isEmpty()) { - if (nns_.isEmpty()) { - nns_ = other.nns_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureNnsIsMutable(); - nns_.addAll(other.nns_); - } - onChanged(); - } - } else { - if (!other.nns_.isEmpty()) { - if (nnsBuilder_.isEmpty()) { - nnsBuilder_.dispose(); - nnsBuilder_ = null; - nns_ = other.nns_; - bitField0_ = (bitField0_ & ~0x00000001); - nnsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getNnsFieldBuilder() : null; - } else { - nnsBuilder_.addAllMessages(other.nns_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getNnsCount(); i++) { - if (!getNns(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - private java.util.List nns_ = - java.util.Collections.emptyList(); - private void ensureNnsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - nns_ = new java.util.ArrayList(nns_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder> nnsBuilder_; - - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public java.util.List getNnsList() { - if (nnsBuilder_ == null) { - return java.util.Collections.unmodifiableList(nns_); - } else { - return nnsBuilder_.getMessageList(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public int getNnsCount() { - if (nnsBuilder_ == null) { - return nns_.size(); - } else { - return nnsBuilder_.getCount(); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint getNns(int index) { - if (nnsBuilder_ == null) { - return nns_.get(index); - } else { - return nnsBuilder_.getMessage(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder setNns( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint value) { - if (nnsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNnsIsMutable(); - nns_.set(index, value); - onChanged(); - } else { - nnsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder setNns( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder builderForValue) { - if (nnsBuilder_ == null) { - ensureNnsIsMutable(); - nns_.set(index, builderForValue.build()); - onChanged(); - } else { - nnsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder addNns(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint value) { - if (nnsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNnsIsMutable(); - nns_.add(value); - onChanged(); - } else { - nnsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder addNns( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint value) { - if (nnsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureNnsIsMutable(); - nns_.add(index, value); - onChanged(); - } else { - nnsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder addNns( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder builderForValue) { - if (nnsBuilder_ == null) { - ensureNnsIsMutable(); - nns_.add(builderForValue.build()); - onChanged(); - } else { - nnsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder addNns( - int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder builderForValue) { - if (nnsBuilder_ == null) { - ensureNnsIsMutable(); - nns_.add(index, builderForValue.build()); - onChanged(); - } else { - nnsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder addAllNns( - java.lang.Iterable values) { - if (nnsBuilder_ == null) { - ensureNnsIsMutable(); - super.addAll(values, nns_); - onChanged(); - } else { - nnsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder clearNns() { - if (nnsBuilder_ == null) { - nns_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - nnsBuilder_.clear(); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public Builder removeNns(int index) { - if (nnsBuilder_ == null) { - ensureNnsIsMutable(); - nns_.remove(index); - onChanged(); - } else { - nnsBuilder_.remove(index); - } - return this; - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder getNnsBuilder( - int index) { - return getNnsFieldBuilder().getBuilder(index); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder getNnsOrBuilder( - int index) { - if (nnsBuilder_ == null) { - return nns_.get(index); } else { - return nnsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public java.util.List - getNnsOrBuilderList() { - if (nnsBuilder_ != null) { - return nnsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(nns_); - } - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder addNnsBuilder() { - return getNnsFieldBuilder().addBuilder( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder addNnsBuilder( - int index) { - return getNnsFieldBuilder().addBuilder( - index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.getDefaultInstance()); - } - /** - * repeated .org.apache.hadoop.hive.metastore.hbase.NotNullConstraints.NotNullConstraint nns = 1; - */ - public java.util.List - getNnsBuilderList() { - return getNnsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder> - getNnsFieldBuilder() { - if (nnsBuilder_ == null) { - nnsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.NotNullConstraints.NotNullConstraintOrBuilder>( - nns_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - nns_ = null; - } - return nnsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.NotNullConstraints) - } - - static { - defaultInstance = new NotNullConstraints(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.NotNullConstraints) - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\033hbase_metastore_proto.proto\022&org.apach" + - "e.hadoop.hive.metastore.hbase\"h\n\tAggrSta" + - "ts\022\023\n\013parts_found\030\001 \002(\003\022F\n\tcol_stats\030\002 \003" + - "(\01323.org.apache.hadoop.hive.metastore.hb" + - "ase.ColumnStats\"\364\001\n\024AggrStatsBloomFilter" + - "\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030\002 \002(\014\022^\n" + - "\014bloom_filter\030\003 \002(\0132H.org.apache.hadoop." + - "hive.metastore.hbase.AggrStatsBloomFilte" + - "r.BloomFilter\022\025\n\raggregated_at\030\004 \002(\003\032@\n\013" + - "BloomFilter\022\020\n\010num_bits\030\001 \002(\005\022\021\n\tnum_fun", - "cs\030\002 \002(\005\022\014\n\004bits\030\003 \003(\003\"\357\001\n\032AggrStatsInva" + - "lidatorFilter\022_\n\rto_invalidate\030\001 \003(\0132H.o" + - "rg.apache.hadoop.hive.metastore.hbase.Ag" + - "grStatsInvalidatorFilter.Entry\022\021\n\trun_ev" + - "ery\030\002 \002(\003\022\034\n\024max_cache_entry_life\030\003 \002(\003\032" + - "?\n\005Entry\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030" + - "\002 \002(\014\022\021\n\tpart_name\030\003 \002(\014\"\362\010\n\013ColumnStats" + - "\022\025\n\rlast_analyzed\030\001 \001(\003\022\023\n\013column_type\030\002" + - " \002(\t\022\021\n\tnum_nulls\030\003 \001(\003\022\033\n\023num_distinct_" + - "values\030\004 \001(\003\022T\n\nbool_stats\030\005 \001(\0132@.org.a", - "pache.hadoop.hive.metastore.hbase.Column" + - "Stats.BooleanStats\022Q\n\nlong_stats\030\006 \001(\0132=" + - ".org.apache.hadoop.hive.metastore.hbase." + - "ColumnStats.LongStats\022U\n\014double_stats\030\007 " + - "\001(\0132?.org.apache.hadoop.hive.metastore.h" + - "base.ColumnStats.DoubleStats\022U\n\014string_s" + - "tats\030\010 \001(\0132?.org.apache.hadoop.hive.meta" + - "store.hbase.ColumnStats.StringStats\022U\n\014b" + - "inary_stats\030\t \001(\0132?.org.apache.hadoop.hi" + - "ve.metastore.hbase.ColumnStats.StringSta", - "ts\022W\n\rdecimal_stats\030\n \001(\0132@.org.apache.h" + - "adoop.hive.metastore.hbase.ColumnStats.D" + - "ecimalStats\022\023\n\013column_name\030\013 \001(\t\022\023\n\013bit_" + - "vectors\030\014 \001(\t\0325\n\014BooleanStats\022\021\n\tnum_tru" + - "es\030\001 \001(\003\022\022\n\nnum_falses\030\002 \001(\003\0322\n\tLongStat" + - "s\022\021\n\tlow_value\030\001 \001(\022\022\022\n\nhigh_value\030\002 \001(\022" + - "\0324\n\013DoubleStats\022\021\n\tlow_value\030\001 \001(\001\022\022\n\nhi" + - "gh_value\030\002 \001(\001\032=\n\013StringStats\022\026\n\016max_col" + - "_length\030\001 \001(\003\022\026\n\016avg_col_length\030\002 \001(\001\032\365\001" + - "\n\014DecimalStats\022[\n\tlow_value\030\001 \001(\0132H.org.", - "apache.hadoop.hive.metastore.hbase.Colum" + - "nStats.DecimalStats.Decimal\022\\\n\nhigh_valu" + - "e\030\002 \001(\0132H.org.apache.hadoop.hive.metasto" + - "re.hbase.ColumnStats.DecimalStats.Decima" + - "l\032*\n\007Decimal\022\020\n\010unscaled\030\001 \002(\014\022\r\n\005scale\030" + - "\002 \002(\005\"\246\002\n\010Database\022\023\n\013description\030\001 \001(\t\022" + - "\013\n\003uri\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.org.a" + - "pache.hadoop.hive.metastore.hbase.Parame" + - "ters\022Q\n\nprivileges\030\004 \001(\0132=.org.apache.ha" + - "doop.hive.metastore.hbase.PrincipalPrivi", - "legeSet\022\022\n\nowner_name\030\005 \001(\t\022I\n\nowner_typ" + - "e\030\006 \001(\01625.org.apache.hadoop.hive.metasto" + - "re.hbase.PrincipalType\"$\n\017DelegationToke" + - "n\022\021\n\ttoken_str\030\001 \002(\t\":\n\013FieldSchema\022\014\n\004n" + - "ame\030\001 \002(\t\022\014\n\004type\030\002 \002(\t\022\017\n\007comment\030\003 \001(\t" + - "\"\206\004\n\010Function\022\022\n\nclass_name\030\001 \001(\t\022\022\n\nown" + - "er_name\030\002 \001(\t\022I\n\nowner_type\030\003 \001(\01625.org." + - "apache.hadoop.hive.metastore.hbase.Princ" + - "ipalType\022\023\n\013create_time\030\004 \001(\022\022T\n\rfunctio" + - "n_type\030\005 \001(\0162=.org.apache.hadoop.hive.me", - "tastore.hbase.Function.FunctionType\022S\n\rr" + - "esource_uris\030\006 \003(\0132<.org.apache.hadoop.h" + - "ive.metastore.hbase.Function.ResourceUri" + - "\032\254\001\n\013ResourceUri\022`\n\rresource_type\030\001 \002(\0162" + - "I.org.apache.hadoop.hive.metastore.hbase" + - ".Function.ResourceUri.ResourceType\022\013\n\003ur" + - "i\030\002 \002(\t\".\n\014ResourceType\022\007\n\003JAR\020\001\022\010\n\004FILE" + - "\020\002\022\013\n\007ARCHIVE\020\003\"\030\n\014FunctionType\022\010\n\004JAVA\020" + - "\001\"\037\n\tMasterKey\022\022\n\nmaster_key\030\001 \002(\t\",\n\016Pa" + - "rameterEntry\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t", - "\"W\n\nParameters\022I\n\tparameter\030\001 \003(\01326.org." + - "apache.hadoop.hive.metastore.hbase.Param" + - "eterEntry\"\360\001\n\tPartition\022\023\n\013create_time\030\001" + - " \001(\003\022\030\n\020last_access_time\030\002 \001(\003\022\020\n\010locati" + - "on\030\003 \001(\t\022I\n\rsd_parameters\030\004 \001(\01322.org.ap" + - "ache.hadoop.hive.metastore.hbase.Paramet" + - "ers\022\017\n\007sd_hash\030\005 \002(\014\022F\n\nparameters\030\006 \001(\013" + - "22.org.apache.hadoop.hive.metastore.hbas" + - "e.Parameters\"\204\001\n\032PrincipalPrivilegeSetEn" + - "try\022\026\n\016principal_name\030\001 \002(\t\022N\n\nprivilege", - "s\030\002 \003(\0132:.org.apache.hadoop.hive.metasto" + - "re.hbase.PrivilegeGrantInfo\"\275\001\n\025Principa" + - "lPrivilegeSet\022Q\n\005users\030\001 \003(\0132B.org.apach" + - "e.hadoop.hive.metastore.hbase.PrincipalP" + - "rivilegeSetEntry\022Q\n\005roles\030\002 \003(\0132B.org.ap" + - "ache.hadoop.hive.metastore.hbase.Princip" + - "alPrivilegeSetEntry\"\260\001\n\022PrivilegeGrantIn" + - "fo\022\021\n\tprivilege\030\001 \001(\t\022\023\n\013create_time\030\002 \001" + - "(\003\022\017\n\007grantor\030\003 \001(\t\022K\n\014grantor_type\030\004 \001(" + - "\01625.org.apache.hadoop.hive.metastore.hba", - "se.PrincipalType\022\024\n\014grant_option\030\005 \001(\010\"\374" + - "\001\n\rRoleGrantInfo\022\026\n\016principal_name\030\001 \002(\t" + - "\022M\n\016principal_type\030\002 \002(\01625.org.apache.ha" + - "doop.hive.metastore.hbase.PrincipalType\022" + - "\020\n\010add_time\030\003 \001(\003\022\017\n\007grantor\030\004 \001(\t\022K\n\014gr" + - "antor_type\030\005 \001(\01625.org.apache.hadoop.hiv" + - "e.metastore.hbase.PrincipalType\022\024\n\014grant" + - "_option\030\006 \001(\010\"^\n\021RoleGrantInfoList\022I\n\ngr" + - "ant_info\030\001 \003(\01325.org.apache.hadoop.hive." + - "metastore.hbase.RoleGrantInfo\"\030\n\010RoleLis", - "t\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013create_time\030\001" + - " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021StorageDesc" + - "riptor\022A\n\004cols\030\001 \003(\01323.org.apache.hadoop" + - ".hive.metastore.hbase.FieldSchema\022\024\n\014inp" + - "ut_format\030\002 \001(\t\022\025\n\routput_format\030\003 \001(\t\022\025" + - "\n\ris_compressed\030\004 \001(\010\022\023\n\013num_buckets\030\005 \001" + - "(\021\022W\n\nserde_info\030\006 \001(\0132C.org.apache.hado" + - "op.hive.metastore.hbase.StorageDescripto" + - "r.SerDeInfo\022\023\n\013bucket_cols\030\007 \003(\t\022R\n\tsort" + - "_cols\030\010 \003(\0132?.org.apache.hadoop.hive.met", - "astore.hbase.StorageDescriptor.Order\022Y\n\013" + - "skewed_info\030\t \001(\0132D.org.apache.hadoop.hi" + - "ve.metastore.hbase.StorageDescriptor.Ske" + - "wedInfo\022!\n\031stored_as_sub_directories\030\n \001" + - "(\010\032.\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" + - "r\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004name\030\001 \001(\t\022\031\n" + - "\021serialization_lib\030\002 \001(\t\022F\n\nparameters\030\003" + - " \001(\01322.org.apache.hadoop.hive.metastore." + - "hbase.Parameters\032\214\003\n\nSkewedInfo\022\030\n\020skewe" + - "d_col_names\030\001 \003(\t\022r\n\021skewed_col_values\030\002", - " \003(\0132W.org.apache.hadoop.hive.metastore." + - "hbase.StorageDescriptor.SkewedInfo.Skewe" + - "dColValueList\022\206\001\n\036skewed_col_value_locat" + - "ion_maps\030\003 \003(\0132^.org.apache.hadoop.hive." + - "metastore.hbase.StorageDescriptor.Skewed" + - "Info.SkewedColValueLocationMap\032.\n\022Skewed" + - "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" + - "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" + - "\022\r\n\005value\030\002 \002(\t\"\254\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + - "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim", - "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" + - " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" + - ".hadoop.hive.metastore.hbase.Parameters\022" + - "\017\n\007sd_hash\030\007 \002(\014\022K\n\016partition_keys\030\010 \003(\013" + - "23.org.apache.hadoop.hive.metastore.hbas" + - "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." + - "apache.hadoop.hive.metastore.hbase.Param" + - "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" + - "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" + - "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo", - "p.hive.metastore.hbase.PrincipalPrivileg" + - "eSet\022\024\n\014is_temporary\030\016 \001(\010\022\032\n\022is_rewrite" + - "_enabled\030\017 \001(\010\"\334\002\n\005Index\022\031\n\021indexHandler" + - "Class\030\001 \001(\t\022\016\n\006dbName\030\002 \002(\t\022\025\n\rorigTable" + - "Name\030\003 \002(\t\022\020\n\010location\030\004 \001(\t\022I\n\rsd_param" + - "eters\030\005 \001(\01322.org.apache.hadoop.hive.met" + - "astore.hbase.Parameters\022\022\n\ncreateTime\030\006 " + - "\001(\005\022\026\n\016lastAccessTime\030\007 \001(\005\022\026\n\016indexTabl" + - "eName\030\010 \001(\t\022\017\n\007sd_hash\030\t \001(\014\022F\n\nparamete" + - "rs\030\n \001(\01322.org.apache.hadoop.hive.metast", - "ore.hbase.Parameters\022\027\n\017deferredRebuild\030" + - "\013 \001(\010\"\353\004\n\026PartitionKeyComparator\022\r\n\005name" + - "s\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G.org" + - ".apache.hadoop.hive.metastore.hbase.Part" + - "itionKeyComparator.Operator\022S\n\005range\030\004 \003" + - "(\0132D.org.apache.hadoop.hive.metastore.hb" + - "ase.PartitionKeyComparator.Range\032(\n\004Mark" + - "\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001\n\005R" + - "ange\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.org.a" + - "pache.hadoop.hive.metastore.hbase.Partit", - "ionKeyComparator.Mark\022P\n\003end\030\003 \001(\0132C.org" + - ".apache.hadoop.hive.metastore.hbase.Part" + - "itionKeyComparator.Mark\032\241\001\n\010Operator\022Z\n\004" + - "type\030\001 \002(\0162L.org.apache.hadoop.hive.meta" + - "store.hbase.PartitionKeyComparator.Opera" + - "tor.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Ty" + - "pe\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001\"\373\001\n\nPrimaryK" + - "ey\022\017\n\007pk_name\030\001 \002(\t\022Q\n\004cols\030\002 \003(\0132C.org." + - "apache.hadoop.hive.metastore.hbase.Prima" + - "ryKey.PrimaryKeyColumn\022\031\n\021enable_constra", - "int\030\003 \001(\010\022\033\n\023validate_constraint\030\004 \001(\010\022\027" + - "\n\017rely_constraint\030\005 \001(\010\0328\n\020PrimaryKeyCol" + - "umn\022\023\n\013column_name\030\001 \002(\t\022\017\n\007key_seq\030\002 \002(" + - "\021\"\205\004\n\013ForeignKeys\022K\n\003fks\030\001 \003(\0132>.org.apa" + - "che.hadoop.hive.metastore.hbase.ForeignK" + - "eys.ForeignKey\032\250\003\n\nForeignKey\022\017\n\007fk_name" + - "\030\001 \002(\t\022\032\n\022referenced_db_name\030\002 \002(\t\022\035\n\025re" + - "ferenced_table_name\030\003 \002(\t\022\032\n\022referenced_" + - "pk_name\030\004 \001(\t\022\023\n\013update_rule\030\005 \001(\005\022\023\n\013de" + - "lete_rule\030\006 \001(\005\022]\n\004cols\030\007 \003(\0132O.org.apac", - "he.hadoop.hive.metastore.hbase.ForeignKe" + - "ys.ForeignKey.ForeignKeyColumn\022\031\n\021enable" + - "_constraint\030\010 \001(\010\022\033\n\023validate_constraint" + - "\030\t \001(\010\022\027\n\017rely_constraint\030\n \001(\010\032X\n\020Forei" + - "gnKeyColumn\022\023\n\013column_name\030\001 \002(\t\022\036\n\026refe" + - "renced_column_name\030\002 \002(\t\022\017\n\007key_seq\030\003 \002(" + - "\021\"\224\003\n\021UniqueConstraints\022W\n\003uks\030\001 \003(\0132J.o" + - "rg.apache.hadoop.hive.metastore.hbase.Un" + - "iqueConstraints.UniqueConstraint\032\245\002\n\020Uni" + - "queConstraint\022\017\n\007uk_name\030\001 \002(\t\022o\n\004cols\030\002", - " \003(\0132a.org.apache.hadoop.hive.metastore." + - "hbase.UniqueConstraints.UniqueConstraint" + - ".UniqueConstraintColumn\022\031\n\021enable_constr" + - "aint\030\003 \001(\010\022\033\n\023validate_constraint\030\004 \001(\010\022" + - "\027\n\017rely_constraint\030\005 \001(\010\032>\n\026UniqueConstr" + - "aintColumn\022\023\n\013column_name\030\001 \002(\t\022\017\n\007key_s" + - "eq\030\002 \002(\021\"\213\003\n\022NotNullConstraints\022Y\n\003nns\030\001" + - " \003(\0132L.org.apache.hadoop.hive.metastore." + - "hbase.NotNullConstraints.NotNullConstrai" + - "nt\032\231\002\n\021NotNullConstraint\022\017\n\007nn_name\030\001 \002(", - "\t\022r\n\004cols\030\002 \003(\0132d.org.apache.hadoop.hive" + - ".metastore.hbase.NotNullConstraints.NotN" + - "ullConstraint.NotNullConstraintColumn\022\031\n" + - "\021enable_constraint\030\003 \001(\010\022\033\n\023validate_con" + - "straint\030\004 \001(\010\022\027\n\017rely_constraint\030\005 \001(\010\032." + - "\n\027NotNullConstraintColumn\022\023\n\013column_name" + - "\030\001 \002(\t*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROL" + - "E\020\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor, - new java.lang.String[] { "PartsFound", "ColStats", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor, - new java.lang.String[] { "DbName", "TableName", "BloomFilter", "AggregatedAt", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor, - new java.lang.String[] { "NumBits", "NumFuncs", "Bits", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor, - new java.lang.String[] { "ToInvalidate", "RunEvery", "MaxCacheEntryLife", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor, - new java.lang.String[] { "DbName", "TableName", "PartName", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor, - new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", "ColumnName", "BitVectors", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor, - new java.lang.String[] { "NumTrues", "NumFalses", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(1); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor, - new java.lang.String[] { "LowValue", "HighValue", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(2); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor, - new java.lang.String[] { "LowValue", "HighValue", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(3); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor, - new java.lang.String[] { "MaxColLength", "AvgColLength", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(4); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor, - new java.lang.String[] { "LowValue", "HighValue", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor, - new java.lang.String[] { "Unscaled", "Scale", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor, - new java.lang.String[] { "Description", "Uri", "Parameters", "Privileges", "OwnerName", "OwnerType", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor, - new java.lang.String[] { "TokenStr", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor, - new java.lang.String[] { "Name", "Type", "Comment", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor, - new java.lang.String[] { "ClassName", "OwnerName", "OwnerType", "CreateTime", "FunctionType", "ResourceUris", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor, - new java.lang.String[] { "ResourceType", "Uri", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor, - new java.lang.String[] { "MasterKey", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor, - new java.lang.String[] { "Parameter", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor, - new java.lang.String[] { "CreateTime", "LastAccessTime", "Location", "SdParameters", "SdHash", "Parameters", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor, - new java.lang.String[] { "PrincipalName", "Privileges", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor, - new java.lang.String[] { "Users", "Roles", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor, - new java.lang.String[] { "Privilege", "CreateTime", "Grantor", "GrantorType", "GrantOption", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor, - new java.lang.String[] { "PrincipalName", "PrincipalType", "AddTime", "Grantor", "GrantorType", "GrantOption", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor = - getDescriptor().getMessageTypes().get(16); - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor, - new java.lang.String[] { "GrantInfo", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor, - new java.lang.String[] { "Role", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor = - getDescriptor().getMessageTypes().get(18); - internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor, - new java.lang.String[] { "CreateTime", "OwnerName", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor = - getDescriptor().getMessageTypes().get(19); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor, - new java.lang.String[] { "Cols", "InputFormat", "OutputFormat", "IsCompressed", "NumBuckets", "SerdeInfo", "BucketCols", "SortCols", "SkewedInfo", "StoredAsSubDirectories", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor, - new java.lang.String[] { "ColumnName", "Order", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(1); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor, - new java.lang.String[] { "Name", "SerializationLib", "Parameters", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(2); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor, - new java.lang.String[] { "SkewedColNames", "SkewedColValues", "SkewedColValueLocationMaps", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor, - new java.lang.String[] { "SkewedColValue", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor.getNestedTypes().get(1); - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor = - getDescriptor().getMessageTypes().get(20); - internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, - new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", "IsRewriteEnabled", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor = - getDescriptor().getMessageTypes().get(21); - internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor, - new java.lang.String[] { "IndexHandlerClass", "DbName", "OrigTableName", "Location", "SdParameters", "CreateTime", "LastAccessTime", "IndexTableName", "SdHash", "Parameters", "DeferredRebuild", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor, - new java.lang.String[] { "Names", "Types", "Op", "Range", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor, - new java.lang.String[] { "Value", "Inclusive", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(1); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor, - new java.lang.String[] { "Key", "Start", "End", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(2); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor, - new java.lang.String[] { "Type", "Key", "Val", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor, - new java.lang.String[] { "PkName", "Cols", "EnableConstraint", "ValidateConstraint", "RelyConstraint", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_PrimaryKey_PrimaryKeyColumn_descriptor, - new java.lang.String[] { "ColumnName", "KeySeq", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor = - getDescriptor().getMessageTypes().get(24); - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor, - new java.lang.String[] { "Fks", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor, - new java.lang.String[] { "FkName", "ReferencedDbName", "ReferencedTableName", "ReferencedPkName", "UpdateRule", "DeleteRule", "Cols", "EnableConstraint", "ValidateConstraint", "RelyConstraint", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_ForeignKeys_ForeignKey_ForeignKeyColumn_descriptor, - new java.lang.String[] { "ColumnName", "ReferencedColumnName", "KeySeq", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor = - getDescriptor().getMessageTypes().get(25); - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor, - new java.lang.String[] { "Uks", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor, - new java.lang.String[] { "UkName", "Cols", "EnableConstraint", "ValidateConstraint", "RelyConstraint", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_UniqueConstraints_UniqueConstraint_UniqueConstraintColumn_descriptor, - new java.lang.String[] { "ColumnName", "KeySeq", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor = - getDescriptor().getMessageTypes().get(26); - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor, - new java.lang.String[] { "Nns", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor, - new java.lang.String[] { "NnName", "Cols", "EnableConstraint", "ValidateConstraint", "RelyConstraint", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_descriptor = - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_descriptor.getNestedTypes().get(0); - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_org_apache_hadoop_hive_metastore_hbase_NotNullConstraints_NotNullConstraint_NotNullConstraintColumn_descriptor, - new java.lang.String[] { "ColumnName", }); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index edfbf3a..48602bd 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -22,11 +22,16 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; +import java.math.BigDecimal; +import java.math.BigInteger; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.URL; import java.net.URLClassLoader; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -37,6 +42,10 @@ import java.util.Map.Entry; import java.util.Properties; import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -46,6 +55,9 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.ListUtils; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -114,6 +126,7 @@ // configuration parameter documentation // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well. public static final char[] specialCharactersInTableNames = new char[] { '/' }; + final static Charset ENCODING = StandardCharsets.UTF_8; public static Table createColumnsetSchema(String name, List columns, List partCols, Configuration conf) throws MetaException { @@ -1988,4 +2001,88 @@ public static MetaException newMetaException(String errorMessage, Exception e) { return colStats; } + /** + * Produce a hash for the storage descriptor + * @param sd storage descriptor to hash + * @param md message descriptor to use to generate the hash + * @return the hash as a byte array + */ + public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { + // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different + // results for hashes based on the OS or JVM being used. + md.reset(); + for (FieldSchema fs : sd.getCols()) { + md.update(fs.getName().getBytes(ENCODING)); + md.update(fs.getType().getBytes(ENCODING)); + if (fs.getComment() != null) md.update(fs.getComment().getBytes(ENCODING)); + } + if (sd.getInputFormat() != null) { + md.update(sd.getInputFormat().getBytes(ENCODING)); + } + if (sd.getOutputFormat() != null) { + md.update(sd.getOutputFormat().getBytes(ENCODING)); + } + md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); + if (sd.getSerdeInfo() != null) { + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + md.update(serde.getName().getBytes(ENCODING)); + } + if (serde.getSerializationLib() != null) { + md.update(serde.getSerializationLib().getBytes(ENCODING)); + } + if (serde.getParameters() != null) { + SortedMap params = new TreeMap<>(serde.getParameters()); + for (Entry param : params.entrySet()) { + md.update(param.getKey().getBytes(ENCODING)); + md.update(param.getValue().getBytes(ENCODING)); + } + } + } + if (sd.getBucketCols() != null) { + List bucketCols = new ArrayList<>(sd.getBucketCols()); + for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); + } + if (sd.getSortCols() != null) { + SortedSet orders = new TreeSet<>(sd.getSortCols()); + for (Order order : orders) { + md.update(order.getCol().getBytes(ENCODING)); + md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); + } + } + if (sd.getSkewedInfo() != null) { + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); + for (String colname : colnames) md.update(colname.getBytes(ENCODING)); + } + if (skewed.getSkewedColValues() != null) { + SortedSet sortedOuterList = new TreeSet<>(); + for (List innerList : skewed.getSkewedColValues()) { + SortedSet sortedInnerList = new TreeSet<>(innerList); + sortedOuterList.add(StringUtils.join(sortedInnerList, ".")); + } + for (String colval : sortedOuterList) md.update(colval.getBytes(ENCODING)); + } + if (skewed.getSkewedColValueLocationMaps() != null) { + SortedMap sortedMap = new TreeMap<>(); + for (Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { + SortedSet sortedKey = new TreeSet<>(smap.getKey()); + sortedMap.put(StringUtils.join(sortedKey, "."), smap.getValue()); + } + for (Entry e : sortedMap.entrySet()) { + md.update(e.getKey().getBytes(ENCODING)); + md.update(e.getValue().getBytes(ENCODING)); + } + } + md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + } + + return md.digest(); + } + + public static double decimalToDouble(Decimal decimal) { + return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index b6fb4fd..80b17e0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -27,6 +27,7 @@ import java.util.Map.Entry; import java.util.TreeMap; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -40,7 +41,6 @@ import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.StorageDescriptorWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper; -import org.apache.hadoop.hive.metastore.hbase.HBaseUtils; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -117,7 +117,7 @@ public static synchronized void addTableToCache(String dbName, String tblName, T } TableWrapper wrapper; if (tbl.getSd() != null) { - byte[] sdHash = HBaseUtils.hashStorageDescriptor(tbl.getSd(), md); + byte[] sdHash = MetaStoreUtils.hashStorageDescriptor(tbl.getSd(), md); StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); @@ -285,7 +285,7 @@ public static synchronized void addPartitionToCache(String dbName, String tblNam Partition partCopy = part.deepCopy(); PartitionWrapper wrapper; if (part.getSd()!=null) { - byte[] sdHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + byte[] sdHash = MetaStoreUtils.hashStorageDescriptor(part.getSd(), md); StorageDescriptor sd = part.getSd(); increSd(sd, sdHash); partCopy.setSd(null); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java new file mode 100644 index 0000000..e6c836b --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.MetaException; + +public class BinaryColumnStatsAggregator extends ColumnStatsAggregator { + + @Override + public ColumnStatisticsObj aggregate(String colName, List partNames, + List css) throws MetaException { + ColumnStatisticsObj statsObj = null; + BinaryColumnStatsData aggregateData = null; + String colType = null; + for (ColumnStatistics cs : css) { + if (cs.getStatsObjSize() != 1) { + throw new MetaException( + "The number of columns should be exactly one in aggrStats, but found " + + cs.getStatsObjSize()); + } + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + if (statsObj == null) { + colType = cso.getColType(); + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso + .getStatsData().getSetField()); + } + BinaryColumnStatsData newData = cso.getStatsData().getBinaryStats(); + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); + aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } + } + ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); + columnStatisticsData.setBinaryStats(aggregateData); + statsObj.setStatsData(columnStatisticsData); + return statsObj; + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java new file mode 100644 index 0000000..a34bc9f --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.MetaException; + +public class BooleanColumnStatsAggregator extends ColumnStatsAggregator { + + @Override + public ColumnStatisticsObj aggregate(String colName, List partNames, + List css) throws MetaException { + ColumnStatisticsObj statsObj = null; + BooleanColumnStatsData aggregateData = null; + String colType = null; + for (ColumnStatistics cs : css) { + if (cs.getStatsObjSize() != 1) { + throw new MetaException( + "The number of columns should be exactly one in aggrStats, but found " + + cs.getStatsObjSize()); + } + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + if (statsObj == null) { + colType = cso.getColType(); + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso + .getStatsData().getSetField()); + } + BooleanColumnStatsData newData = cso.getStatsData().getBooleanStats(); + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); + aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } + } + ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); + columnStatisticsData.setBooleanStats(aggregateData); + statsObj.setStatsData(columnStatisticsData); + return statsObj; + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java new file mode 100644 index 0000000..a52e5e5 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.MetaException; + +public abstract class ColumnStatsAggregator { + public boolean useDensityFunctionForNDVEstimation; + public double ndvTuner; + public abstract ColumnStatisticsObj aggregate(String colName, List partNames, + List css) throws MetaException; +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java new file mode 100644 index 0000000..173e06f --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; + +public class ColumnStatsAggregatorFactory { + + private ColumnStatsAggregatorFactory() { + } + + public static ColumnStatsAggregator getColumnStatsAggregator(_Fields type, + boolean useDensityFunctionForNDVEstimation, double ndvTuner) { + ColumnStatsAggregator agg; + switch (type) { + case BOOLEAN_STATS: + agg = new BooleanColumnStatsAggregator(); + break; + case LONG_STATS: + agg = new LongColumnStatsAggregator(); + break; + case DATE_STATS: + agg = new DateColumnStatsAggregator(); + break; + case DOUBLE_STATS: + agg = new DoubleColumnStatsAggregator(); + break; + case STRING_STATS: + agg = new StringColumnStatsAggregator(); + break; + case BINARY_STATS: + agg = new BinaryColumnStatsAggregator(); + break; + case DECIMAL_STATS: + agg = new DecimalColumnStatsAggregator(); + break; + default: + throw new RuntimeException("Woh, bad. Unknown stats type " + type.toString()); + } + agg.useDensityFunctionForNDVEstimation = useDensityFunctionForNDVEstimation; + agg.ndvTuner = ndvTuner; + return agg; + } + + public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) { + ColumnStatisticsObj cso = new ColumnStatisticsObj(); + ColumnStatisticsData csd = new ColumnStatisticsData(); + cso.setColName(colName); + cso.setColType(colType); + switch (type) { + case BOOLEAN_STATS: + csd.setBooleanStats(new BooleanColumnStatsData()); + break; + + case LONG_STATS: + csd.setLongStats(new LongColumnStatsData()); + break; + + case DATE_STATS: + csd.setDateStats(new DateColumnStatsData()); + break; + + case DOUBLE_STATS: + csd.setDoubleStats(new DoubleColumnStatsData()); + break; + + case STRING_STATS: + csd.setStringStats(new StringColumnStatsData()); + break; + + case BINARY_STATS: + csd.setBinaryStats(new BinaryColumnStatsData()); + break; + + case DECIMAL_STATS: + csd.setDecimalStats(new DecimalColumnStatsData()); + break; + + default: + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + + cso.setStatsData(csd); + return cso; + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java new file mode 100644 index 0000000..c5e72eb --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java @@ -0,0 +1,371 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.StatObjectConverter; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DecimalColumnStatsAggregator extends ColumnStatsAggregator implements + IExtrapolatePartStatus { + + private static final Logger LOG = LoggerFactory.getLogger(DecimalColumnStatsAggregator.class); + + @Override + public ColumnStatisticsObj aggregate(String colName, List partNames, + List css) throws MetaException { + ColumnStatisticsObj statsObj = null; + + // check if all the ColumnStatisticsObjs contain stats and all the ndv are + // bitvectors + boolean doAllPartitionContainStats = partNames.size() == css.size(); + LOG.debug("doAllPartitionContainStats for " + colName + " is " + doAllPartitionContainStats); + NumDistinctValueEstimator ndvEstimator = null; + String colType = null; + for (ColumnStatistics cs : css) { + if (cs.getStatsObjSize() != 1) { + throw new MetaException( + "The number of columns should be exactly one in aggrStats, but found " + + cs.getStatsObjSize()); + } + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + if (statsObj == null) { + colType = cso.getColType(); + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso + .getStatsData().getSetField()); + } + if (!cso.getStatsData().getDecimalStats().isSetBitVectors() + || cso.getStatsData().getDecimalStats().getBitVectors().length() == 0) { + ndvEstimator = null; + break; + } else { + // check if all of the bit vectors can merge + NumDistinctValueEstimator estimator = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(cso.getStatsData().getDecimalStats().getBitVectors()); + if (ndvEstimator == null) { + ndvEstimator = estimator; + } else { + if (ndvEstimator.canMerge(estimator)) { + continue; + } else { + ndvEstimator = null; + break; + } + } + } + } + if (ndvEstimator != null) { + ndvEstimator = NumDistinctValueEstimatorFactory + .getEmptyNumDistinctValueEstimator(ndvEstimator); + } + LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null)); + ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); + if (doAllPartitionContainStats || css.size() < 2) { + DecimalColumnStatsData aggregateData = null; + long lowerBound = 0; + long higherBound = 0; + double densityAvgSum = 0.0; + for (ColumnStatistics cs : css) { + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); + lowerBound = Math.max(lowerBound, newData.getNumDVs()); + higherBound += newData.getNumDVs(); + densityAvgSum += (MetaStoreUtils.decimalToDouble(newData.getHighValue()) - MetaStoreUtils + .decimalToDouble(newData.getLowValue())) / newData.getNumDVs(); + if (ndvEstimator != null) { + ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors())); + } + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + if (MetaStoreUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreUtils + .decimalToDouble(newData.getLowValue())) { + aggregateData.setLowValue(aggregateData.getLowValue()); + } else { + aggregateData.setLowValue(newData.getLowValue()); + } + if (MetaStoreUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreUtils + .decimalToDouble(newData.getHighValue())) { + aggregateData.setHighValue(aggregateData.getHighValue()); + } else { + aggregateData.setHighValue(newData.getHighValue()); + } + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } + } + if (ndvEstimator != null) { + // if all the ColumnStatisticsObjs contain bitvectors, we do not need to + // use uniform distribution assumption because we can merge bitvectors + // to get a good estimation. + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + } else { + long estimation; + if (useDensityFunctionForNDVEstimation) { + // We have estimation, lowerbound and higherbound. We use estimation + // if it is between lowerbound and higherbound. + double densityAvg = densityAvgSum / partNames.size(); + estimation = (long) ((MetaStoreUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreUtils + .decimalToDouble(aggregateData.getLowValue())) / densityAvg); + if (estimation < lowerBound) { + estimation = lowerBound; + } else if (estimation > higherBound) { + estimation = higherBound; + } + } else { + estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); + } + aggregateData.setNumDVs(estimation); + } + columnStatisticsData.setDecimalStats(aggregateData); + } else { + // we need extrapolation + LOG.debug("start extrapolation for " + colName); + Map indexMap = new HashMap(); + for (int index = 0; index < partNames.size(); index++) { + indexMap.put(partNames.get(index), index); + } + Map adjustedIndexMap = new HashMap(); + Map adjustedStatsMap = new HashMap(); + // while we scan the css, we also get the densityAvg, lowerbound and + // higerbound when useDensityFunctionForNDVEstimation is true. + double densityAvgSum = 0.0; + if (ndvEstimator == null) { + // if not every partition uses bitvector for ndv, we just fall back to + // the traditional extrapolation methods. + for (ColumnStatistics cs : css) { + String partName = cs.getStatsDesc().getPartName(); + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (MetaStoreUtils.decimalToDouble(newData.getHighValue()) - MetaStoreUtils + .decimalToDouble(newData.getLowValue())) / newData.getNumDVs(); + } + adjustedIndexMap.put(partName, (double) indexMap.get(partName)); + adjustedStatsMap.put(partName, cso.getStatsData()); + } + } else { + // we first merge all the adjacent bitvectors that we could merge and + // derive new partition names and index. + StringBuilder pseudoPartName = new StringBuilder(); + double pseudoIndexSum = 0; + int length = 0; + int curIndex = -1; + DecimalColumnStatsData aggregateData = null; + for (ColumnStatistics cs : css) { + String partName = cs.getStatsDesc().getPartName(); + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); + // newData.isSetBitVectors() should be true for sure because we + // already checked it before. + if (indexMap.get(partName) != curIndex) { + // There is bitvector, but it is not adjacent to the previous ones. + if (length > 0) { + // we have to set ndv + adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + ColumnStatisticsData csd = new ColumnStatisticsData(); + csd.setDecimalStats(aggregateData); + adjustedStatsMap.put(pseudoPartName.toString(), csd); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (MetaStoreUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreUtils + .decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs(); + } + // reset everything + pseudoPartName = new StringBuilder(); + pseudoIndexSum = 0; + length = 0; + ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); + } + aggregateData = null; + } + curIndex = indexMap.get(partName); + pseudoPartName.append(partName); + pseudoIndexSum += curIndex; + length++; + curIndex++; + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + if (MetaStoreUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreUtils + .decimalToDouble(newData.getLowValue())) { + aggregateData.setLowValue(aggregateData.getLowValue()); + } else { + aggregateData.setLowValue(newData.getLowValue()); + } + if (MetaStoreUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreUtils + .decimalToDouble(newData.getHighValue())) { + aggregateData.setHighValue(aggregateData.getHighValue()); + } else { + aggregateData.setHighValue(newData.getHighValue()); + } + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } + ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors())); + } + if (length > 0) { + // we have to set ndv + adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + ColumnStatisticsData csd = new ColumnStatisticsData(); + csd.setDecimalStats(aggregateData); + adjustedStatsMap.put(pseudoPartName.toString(), csd); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (MetaStoreUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreUtils + .decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs(); + } + } + } + extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, + adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); + } + statsObj.setStatsData(columnStatisticsData); + LOG.debug("Ndv estimatation for " + colName + " is " + + columnStatisticsData.getDecimalStats().getNumDVs()); + return statsObj; + } + + @Override + public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, + int numPartsWithStats, Map adjustedIndexMap, + Map adjustedStatsMap, double densityAvg) { + int rightBorderInd = numParts; + DecimalColumnStatsData extrapolateDecimalData = new DecimalColumnStatsData(); + Map extractedAdjustedStatsMap = new HashMap<>(); + for (Map.Entry entry : adjustedStatsMap.entrySet()) { + extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDecimalStats()); + } + List> list = new LinkedList>( + extractedAdjustedStatsMap.entrySet()); + // get the lowValue + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getLowValue().compareTo(o2.getValue().getLowValue()); + } + }); + double minInd = adjustedIndexMap.get(list.get(0).getKey()); + double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + double lowValue = 0; + double min = MetaStoreUtils.decimalToDouble(list.get(0).getValue().getLowValue()); + double max = MetaStoreUtils.decimalToDouble(list.get(list.size() - 1).getValue().getLowValue()); + if (minInd == maxInd) { + lowValue = min; + } else if (minInd < maxInd) { + // left border is the min + lowValue = (max - (max - min) * maxInd / (maxInd - minInd)); + } else { + // right border is the min + lowValue = (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd)); + } + + // get the highValue + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getHighValue().compareTo(o2.getValue().getHighValue()); + } + }); + minInd = adjustedIndexMap.get(list.get(0).getKey()); + maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + double highValue = 0; + min = MetaStoreUtils.decimalToDouble(list.get(0).getValue().getHighValue()); + max = MetaStoreUtils.decimalToDouble(list.get(list.size() - 1).getValue().getHighValue()); + if (minInd == maxInd) { + highValue = min; + } else if (minInd < maxInd) { + // right border is the max + highValue = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); + } else { + // left border is the max + highValue = (min + (max - min) * minInd / (minInd - maxInd)); + } + + // get the #nulls + long numNulls = 0; + for (Map.Entry entry : extractedAdjustedStatsMap.entrySet()) { + numNulls += entry.getValue().getNumNulls(); + } + // we scale up sumNulls based on the number of partitions + numNulls = numNulls * numParts / numPartsWithStats; + + // get the ndv + long ndv = 0; + long ndvMin = 0; + long ndvMax = 0; + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getNumDVs() < o2.getValue().getNumDVs() ? -1 : 1; + } + }); + long lowerBound = list.get(list.size() - 1).getValue().getNumDVs(); + long higherBound = 0; + for (Map.Entry entry : list) { + higherBound += entry.getValue().getNumDVs(); + } + if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) { + ndv = (long) ((highValue - lowValue) / densityAvg); + if (ndv < lowerBound) { + ndv = lowerBound; + } else if (ndv > higherBound) { + ndv = higherBound; + } + } else { + minInd = adjustedIndexMap.get(list.get(0).getKey()); + maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + ndvMin = list.get(0).getValue().getNumDVs(); + ndvMax = list.get(list.size() - 1).getValue().getNumDVs(); + if (minInd == maxInd) { + ndv = ndvMin; + } else if (minInd < maxInd) { + // right border is the max + ndv = (long) (ndvMin + (ndvMax - ndvMin) * (rightBorderInd - minInd) / (maxInd - minInd)); + } else { + // left border is the max + ndv = (long) (ndvMin + (ndvMax - ndvMin) * minInd / (minInd - maxInd)); + } + } + extrapolateDecimalData.setLowValue(StatObjectConverter.createThriftDecimal(String + .valueOf(lowValue))); + extrapolateDecimalData.setHighValue(StatObjectConverter.createThriftDecimal(String + .valueOf(highValue))); + extrapolateDecimalData.setNumNulls(numNulls); + extrapolateDecimalData.setNumDVs(ndv); + extrapolateData.setDecimalStats(extrapolateDecimalData); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java new file mode 100644 index 0000000..e55c412 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java @@ -0,0 +1,345 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DoubleColumnStatsAggregator extends ColumnStatsAggregator implements + IExtrapolatePartStatus { + + private static final Logger LOG = LoggerFactory.getLogger(LongColumnStatsAggregator.class); + + @Override + public ColumnStatisticsObj aggregate(String colName, List partNames, + List css) throws MetaException { + ColumnStatisticsObj statsObj = null; + + // check if all the ColumnStatisticsObjs contain stats and all the ndv are + // bitvectors + boolean doAllPartitionContainStats = partNames.size() == css.size(); + LOG.debug("doAllPartitionContainStats for " + colName + " is " + doAllPartitionContainStats); + NumDistinctValueEstimator ndvEstimator = null; + String colType = null; + for (ColumnStatistics cs : css) { + if (cs.getStatsObjSize() != 1) { + throw new MetaException( + "The number of columns should be exactly one in aggrStats, but found " + + cs.getStatsObjSize()); + } + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + if (statsObj == null) { + colType = cso.getColType(); + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso + .getStatsData().getSetField()); + } + if (!cso.getStatsData().getDoubleStats().isSetBitVectors() + || cso.getStatsData().getDoubleStats().getBitVectors().length() == 0) { + ndvEstimator = null; + break; + } else { + // check if all of the bit vectors can merge + NumDistinctValueEstimator estimator = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(cso.getStatsData().getDoubleStats().getBitVectors()); + if (ndvEstimator == null) { + ndvEstimator = estimator; + } else { + if (ndvEstimator.canMerge(estimator)) { + continue; + } else { + ndvEstimator = null; + break; + } + } + } + } + if (ndvEstimator != null) { + ndvEstimator = NumDistinctValueEstimatorFactory + .getEmptyNumDistinctValueEstimator(ndvEstimator); + } + LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null)); + ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); + if (doAllPartitionContainStats || css.size() < 2) { + DoubleColumnStatsData aggregateData = null; + long lowerBound = 0; + long higherBound = 0; + double densityAvgSum = 0.0; + for (ColumnStatistics cs : css) { + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); + lowerBound = Math.max(lowerBound, newData.getNumDVs()); + higherBound += newData.getNumDVs(); + densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); + if (ndvEstimator != null) { + ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors())); + } + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData + .setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } + } + if (ndvEstimator != null) { + // if all the ColumnStatisticsObjs contain bitvectors, we do not need to + // use uniform distribution assumption because we can merge bitvectors + // to get a good estimation. + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + } else { + long estimation; + if (useDensityFunctionForNDVEstimation) { + // We have estimation, lowerbound and higherbound. We use estimation + // if it is between lowerbound and higherbound. + double densityAvg = densityAvgSum / partNames.size(); + estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg); + if (estimation < lowerBound) { + estimation = lowerBound; + } else if (estimation > higherBound) { + estimation = higherBound; + } + } else { + estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); + } + aggregateData.setNumDVs(estimation); + } + columnStatisticsData.setDoubleStats(aggregateData); + } else { + // we need extrapolation + LOG.debug("start extrapolation for " + colName); + Map indexMap = new HashMap(); + for (int index = 0; index < partNames.size(); index++) { + indexMap.put(partNames.get(index), index); + } + Map adjustedIndexMap = new HashMap(); + Map adjustedStatsMap = new HashMap(); + // while we scan the css, we also get the densityAvg, lowerbound and + // higerbound when useDensityFunctionForNDVEstimation is true. + double densityAvgSum = 0.0; + if (ndvEstimator == null) { + // if not every partition uses bitvector for ndv, we just fall back to + // the traditional extrapolation methods. + for (ColumnStatistics cs : css) { + String partName = cs.getStatsDesc().getPartName(); + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); + } + adjustedIndexMap.put(partName, (double) indexMap.get(partName)); + adjustedStatsMap.put(partName, cso.getStatsData()); + } + } else { + // we first merge all the adjacent bitvectors that we could merge and + // derive new partition names and index. + StringBuilder pseudoPartName = new StringBuilder(); + double pseudoIndexSum = 0; + int length = 0; + int curIndex = -1; + DoubleColumnStatsData aggregateData = null; + for (ColumnStatistics cs : css) { + String partName = cs.getStatsDesc().getPartName(); + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); + // newData.isSetBitVectors() should be true for sure because we + // already checked it before. + if (indexMap.get(partName) != curIndex) { + // There is bitvector, but it is not adjacent to the previous ones. + if (length > 0) { + // we have to set ndv + adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + ColumnStatisticsData csd = new ColumnStatisticsData(); + csd.setDoubleStats(aggregateData); + adjustedStatsMap.put(pseudoPartName.toString(), csd); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); + } + // reset everything + pseudoPartName = new StringBuilder(); + pseudoIndexSum = 0; + length = 0; + ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); + } + aggregateData = null; + } + curIndex = indexMap.get(partName); + pseudoPartName.append(partName); + pseudoIndexSum += curIndex; + length++; + curIndex++; + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), + newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } + ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors())); + } + if (length > 0) { + // we have to set ndv + adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + ColumnStatisticsData csd = new ColumnStatisticsData(); + csd.setDoubleStats(aggregateData); + adjustedStatsMap.put(pseudoPartName.toString(), csd); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); + } + } + } + extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, + adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); + } + LOG.debug("Ndv estimatation for " + colName + " is " + + columnStatisticsData.getDoubleStats().getNumDVs()); + statsObj.setStatsData(columnStatisticsData); + return statsObj; + } + + @Override + public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, + int numPartsWithStats, Map adjustedIndexMap, + Map adjustedStatsMap, double densityAvg) { + int rightBorderInd = numParts; + DoubleColumnStatsData extrapolateDoubleData = new DoubleColumnStatsData(); + Map extractedAdjustedStatsMap = new HashMap<>(); + for (Map.Entry entry : adjustedStatsMap.entrySet()) { + extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDoubleStats()); + } + List> list = new LinkedList>( + extractedAdjustedStatsMap.entrySet()); + // get the lowValue + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getLowValue() < o2.getValue().getLowValue() ? -1 : 1; + } + }); + double minInd = adjustedIndexMap.get(list.get(0).getKey()); + double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + double lowValue = 0; + double min = list.get(0).getValue().getLowValue(); + double max = list.get(list.size() - 1).getValue().getLowValue(); + if (minInd == maxInd) { + lowValue = min; + } else if (minInd < maxInd) { + // left border is the min + lowValue = (max - (max - min) * maxInd / (maxInd - minInd)); + } else { + // right border is the min + lowValue = (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd)); + } + + // get the highValue + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getHighValue() < o2.getValue().getHighValue() ? -1 : 1; + } + }); + minInd = adjustedIndexMap.get(list.get(0).getKey()); + maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + double highValue = 0; + min = list.get(0).getValue().getHighValue(); + max = list.get(list.size() - 1).getValue().getHighValue(); + if (minInd == maxInd) { + highValue = min; + } else if (minInd < maxInd) { + // right border is the max + highValue = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); + } else { + // left border is the max + highValue = (min + (max - min) * minInd / (minInd - maxInd)); + } + + // get the #nulls + long numNulls = 0; + for (Map.Entry entry : extractedAdjustedStatsMap.entrySet()) { + numNulls += entry.getValue().getNumNulls(); + } + // we scale up sumNulls based on the number of partitions + numNulls = numNulls * numParts / numPartsWithStats; + + // get the ndv + long ndv = 0; + long ndvMin = 0; + long ndvMax = 0; + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getNumDVs() < o2.getValue().getNumDVs() ? -1 : 1; + } + }); + long lowerBound = list.get(list.size() - 1).getValue().getNumDVs(); + long higherBound = 0; + for (Map.Entry entry : list) { + higherBound += entry.getValue().getNumDVs(); + } + if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) { + ndv = (long) ((highValue - lowValue) / densityAvg); + if (ndv < lowerBound) { + ndv = lowerBound; + } else if (ndv > higherBound) { + ndv = higherBound; + } + } else { + minInd = adjustedIndexMap.get(list.get(0).getKey()); + maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + ndvMin = list.get(0).getValue().getNumDVs(); + ndvMax = list.get(list.size() - 1).getValue().getNumDVs(); + if (minInd == maxInd) { + ndv = ndvMin; + } else if (minInd < maxInd) { + // right border is the max + ndv = (long) (ndvMin + (ndvMax - ndvMin) * (rightBorderInd - minInd) / (maxInd - minInd)); + } else { + // left border is the max + ndv = (long) (ndvMin + (ndvMax - ndvMin) * minInd / (minInd - maxInd)); + } + } + extrapolateDoubleData.setLowValue(lowValue); + extrapolateDoubleData.setHighValue(highValue); + extrapolateDoubleData.setNumNulls(numNulls); + extrapolateDoubleData.setNumDVs(ndv); + extrapolateData.setDoubleStats(extrapolateDoubleData); + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java new file mode 100644 index 0000000..acf679e --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; + +public interface IExtrapolatePartStatus { + // The following function will extrapolate the stats when the column stats of + // some partitions are missing. + /** + * @param extrapolateData + * it will carry back the specific stats, e.g., DOUBLE_STATS or + * LONG_STATS + * @param numParts + * the total number of partitions + * @param numPartsWithStats + * the number of partitions that have stats + * @param adjustedIndexMap + * the partition name to index map + * @param adjustedStatsMap + * the partition name to its stats map + * @param densityAvg + * the average of ndv density, which is useful when + * useDensityFunctionForNDVEstimation is true. + */ + public abstract void extrapolate(ColumnStatisticsData extrapolateData, int numParts, + int numPartsWithStats, Map adjustedIndexMap, + Map adjustedStatsMap, double densityAvg); + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java new file mode 100644 index 0000000..2ee09f3 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java @@ -0,0 +1,344 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.aggr; + +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LongColumnStatsAggregator extends ColumnStatsAggregator implements + IExtrapolatePartStatus { + + private static final Logger LOG = LoggerFactory.getLogger(LongColumnStatsAggregator.class); + + @Override + public ColumnStatisticsObj aggregate(String colName, List partNames, + List css) throws MetaException { + ColumnStatisticsObj statsObj = null; + + // check if all the ColumnStatisticsObjs contain stats and all the ndv are + // bitvectors + boolean doAllPartitionContainStats = partNames.size() == css.size(); + LOG.debug("doAllPartitionContainStats for " + colName + " is " + doAllPartitionContainStats); + NumDistinctValueEstimator ndvEstimator = null; + String colType = null; + for (ColumnStatistics cs : css) { + if (cs.getStatsObjSize() != 1) { + throw new MetaException( + "The number of columns should be exactly one in aggrStats, but found " + + cs.getStatsObjSize()); + } + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + if (statsObj == null) { + colType = cso.getColType(); + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso + .getStatsData().getSetField()); + } + if (!cso.getStatsData().getLongStats().isSetBitVectors() + || cso.getStatsData().getLongStats().getBitVectors().length() == 0) { + ndvEstimator = null; + break; + } else { + // check if all of the bit vectors can merge + NumDistinctValueEstimator estimator = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(cso.getStatsData().getLongStats().getBitVectors()); + if (ndvEstimator == null) { + ndvEstimator = estimator; + } else { + if (ndvEstimator.canMerge(estimator)) { + continue; + } else { + ndvEstimator = null; + break; + } + } + } + } + if (ndvEstimator != null) { + ndvEstimator = NumDistinctValueEstimatorFactory + .getEmptyNumDistinctValueEstimator(ndvEstimator); + } + LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null)); + ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); + if (doAllPartitionContainStats || css.size() < 2) { + LongColumnStatsData aggregateData = null; + long lowerBound = 0; + long higherBound = 0; + double densityAvgSum = 0.0; + for (ColumnStatistics cs : css) { + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + LongColumnStatsData newData = cso.getStatsData().getLongStats(); + lowerBound = Math.max(lowerBound, newData.getNumDVs()); + higherBound += newData.getNumDVs(); + densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); + if (ndvEstimator != null) { + ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors())); + } + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData + .setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } + } + if (ndvEstimator != null) { + // if all the ColumnStatisticsObjs contain bitvectors, we do not need to + // use uniform distribution assumption because we can merge bitvectors + // to get a good estimation. + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + } else { + long estimation; + if (useDensityFunctionForNDVEstimation) { + // We have estimation, lowerbound and higherbound. We use estimation + // if it is between lowerbound and higherbound. + double densityAvg = densityAvgSum / partNames.size(); + estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg); + if (estimation < lowerBound) { + estimation = lowerBound; + } else if (estimation > higherBound) { + estimation = higherBound; + } + } else { + estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); + } + aggregateData.setNumDVs(estimation); + } + columnStatisticsData.setLongStats(aggregateData); + } else { + // we need extrapolation + LOG.debug("start extrapolation for " + colName); + + Map indexMap = new HashMap(); + for (int index = 0; index < partNames.size(); index++) { + indexMap.put(partNames.get(index), index); + } + Map adjustedIndexMap = new HashMap(); + Map adjustedStatsMap = new HashMap(); + // while we scan the css, we also get the densityAvg, lowerbound and + // higerbound when useDensityFunctionForNDVEstimation is true. + double densityAvgSum = 0.0; + if (ndvEstimator == null) { + // if not every partition uses bitvector for ndv, we just fall back to + // the traditional extrapolation methods. + for (ColumnStatistics cs : css) { + String partName = cs.getStatsDesc().getPartName(); + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + LongColumnStatsData newData = cso.getStatsData().getLongStats(); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); + } + adjustedIndexMap.put(partName, (double) indexMap.get(partName)); + adjustedStatsMap.put(partName, cso.getStatsData()); + } + } else { + // we first merge all the adjacent bitvectors that we could merge and + // derive new partition names and index. + StringBuilder pseudoPartName = new StringBuilder(); + double pseudoIndexSum = 0; + int length = 0; + int curIndex = -1; + LongColumnStatsData aggregateData = null; + for (ColumnStatistics cs : css) { + String partName = cs.getStatsDesc().getPartName(); + ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); + LongColumnStatsData newData = cso.getStatsData().getLongStats(); + // newData.isSetBitVectors() should be true for sure because we + // already checked it before. + if (indexMap.get(partName) != curIndex) { + // There is bitvector, but it is not adjacent to the previous ones. + if (length > 0) { + // we have to set ndv + adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + ColumnStatisticsData csd = new ColumnStatisticsData(); + csd.setLongStats(aggregateData); + adjustedStatsMap.put(pseudoPartName.toString(), csd); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); + } + // reset everything + pseudoPartName = new StringBuilder(); + pseudoIndexSum = 0; + length = 0; + ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); + } + aggregateData = null; + } + curIndex = indexMap.get(partName); + pseudoPartName.append(partName); + pseudoIndexSum += curIndex; + length++; + curIndex++; + if (aggregateData == null) { + aggregateData = newData.deepCopy(); + } else { + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), + newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } + ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors())); + } + if (length > 0) { + // we have to set ndv + adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); + aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); + ColumnStatisticsData csd = new ColumnStatisticsData(); + csd.setLongStats(aggregateData); + adjustedStatsMap.put(pseudoPartName.toString(), csd); + if (useDensityFunctionForNDVEstimation) { + densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); + } + } + } + extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, + adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); + } + statsObj.setStatsData(columnStatisticsData); + LOG.debug("Ndv estimatation for " + colName + " is " + + columnStatisticsData.getLongStats().getNumDVs()); + return statsObj; + } + + @Override + public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, + int numPartsWithStats, Map adjustedIndexMap, + Map adjustedStatsMap, double densityAvg) { + int rightBorderInd = numParts; + LongColumnStatsData extrapolateLongData = new LongColumnStatsData(); + Map extractedAdjustedStatsMap = new HashMap<>(); + for (Map.Entry entry : adjustedStatsMap.entrySet()) { + extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getLongStats()); + } + List> list = new LinkedList>( + extractedAdjustedStatsMap.entrySet()); + // get the lowValue + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getLowValue() < o2.getValue().getLowValue() ? -1 : 1; + } + }); + double minInd = adjustedIndexMap.get(list.get(0).getKey()); + double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + long lowValue = 0; + long min = list.get(0).getValue().getLowValue(); + long max = list.get(list.size() - 1).getValue().getLowValue(); + if (minInd == maxInd) { + lowValue = min; + } else if (minInd < maxInd) { + // left border is the min + lowValue = (long) (max - (max - min) * maxInd / (maxInd - minInd)); + } else { + // right border is the min + lowValue = (long) (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd)); + } + + // get the highValue + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getHighValue() < o2.getValue().getHighValue() ? -1 : 1; + } + }); + minInd = adjustedIndexMap.get(list.get(0).getKey()); + maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + long highValue = 0; + min = list.get(0).getValue().getHighValue(); + max = list.get(list.size() - 1).getValue().getHighValue(); + if (minInd == maxInd) { + highValue = min; + } else if (minInd < maxInd) { + // right border is the max + highValue = (long) (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); + } else { + // left border is the max + highValue = (long) (min + (max - min) * minInd / (minInd - maxInd)); + } + + // get the #nulls + long numNulls = 0; + for (Map.Entry entry : extractedAdjustedStatsMap.entrySet()) { + numNulls += entry.getValue().getNumNulls(); + } + // we scale up sumNulls based on the number of partitions + numNulls = numNulls * numParts / numPartsWithStats; + + // get the ndv + long ndv = 0; + Collections.sort(list, new Comparator>() { + public int compare(Map.Entry o1, + Map.Entry o2) { + return o1.getValue().getNumDVs() < o2.getValue().getNumDVs() ? -1 : 1; + } + }); + long lowerBound = list.get(list.size() - 1).getValue().getNumDVs(); + long higherBound = 0; + for (Map.Entry entry : list) { + higherBound += entry.getValue().getNumDVs(); + } + if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) { + ndv = (long) ((highValue - lowValue) / densityAvg); + if (ndv < lowerBound) { + ndv = lowerBound; + } else if (ndv > higherBound) { + ndv = higherBound; + } + } else { + minInd = adjustedIndexMap.get(list.get(0).getKey()); + maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); + min = list.get(0).getValue().getNumDVs(); + max = list.get(list.size() - 1).getValue().getNumDVs(); + if (minInd == maxInd) { + ndv = min; + } else if (minInd < maxInd) { + // right border is the max + ndv = (long) (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); + } else { + // left border is the max + ndv = (long) (min + (max - min) * minInd / (minInd - maxInd)); + } + } + extrapolateLongData.setLowValue(lowValue); + extrapolateLongData.setHighValue(highValue); + extrapolateLongData.setNumNulls(numNulls); + extrapolateLongData.setNumDVs(ndv); + extrapolateData.setLongStats(extrapolateLongData); + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java new file mode 100644 index 0000000..4c2d1bc --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; + +public class BinaryColumnStatsMerger extends ColumnStatsMerger { + + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + BinaryColumnStatsData aggregateData = aggregateColStats.getStatsData().getBinaryStats(); + BinaryColumnStatsData newData = newColStats.getStatsData().getBinaryStats(); + aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); + aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java new file mode 100644 index 0000000..8e50153 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; + +public class BooleanColumnStatsMerger extends ColumnStatsMerger { + + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + BooleanColumnStatsData aggregateData = aggregateColStats.getStatsData().getBooleanStats(); + BooleanColumnStatsData newData = newColStats.getStatsData().getBooleanStats(); + aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); + aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java new file mode 100644 index 0000000..474d4dd --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class ColumnStatsMerger { + protected final Logger LOG = LoggerFactory.getLogger(ColumnStatsMerger.class.getName()); + + public abstract void merge(ColumnStatisticsObj aggregateColStats, + ColumnStatisticsObj newColStats); +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java new file mode 100644 index 0000000..0ce1847 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; + +public class ColumnStatsMergerFactory { + + private ColumnStatsMergerFactory() { + } + + public static ColumnStatsMerger getColumnStatsMerger(ColumnStatisticsObj statsObjNew, + ColumnStatisticsObj statsObjOld) { + ColumnStatsMerger agg; + _Fields typeNew = statsObjNew.getStatsData().getSetField(); + _Fields typeOld = statsObjOld.getStatsData().getSetField(); + // make sure that they have the same type + typeNew = typeNew == typeOld ? typeNew : null; + switch (typeNew) { + case BOOLEAN_STATS: + agg = new BooleanColumnStatsMerger(); + break; + case LONG_STATS: { + agg = new LongColumnStatsMerger(); + break; + } + case DOUBLE_STATS: { + agg = new DoubleColumnStatsMerger(); + break; + } + case STRING_STATS: { + agg = new StringColumnStatsMerger(); + break; + } + case BINARY_STATS: + agg = new BinaryColumnStatsMerger(); + break; + case DECIMAL_STATS: { + agg = new DecimalColumnStatsMerger(); + break; + } + case DATE_STATS: { + agg = new DateColumnStatsMerger(); + break; + } + default: + throw new IllegalArgumentException("Unknown stats type " + typeNew.toString()); + } + return agg; + } + + public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) { + ColumnStatisticsObj cso = new ColumnStatisticsObj(); + ColumnStatisticsData csd = new ColumnStatisticsData(); + cso.setColName(colName); + cso.setColType(colType); + switch (type) { + case BOOLEAN_STATS: + csd.setBooleanStats(new BooleanColumnStatsData()); + break; + + case LONG_STATS: + csd.setLongStats(new LongColumnStatsData()); + break; + + case DOUBLE_STATS: + csd.setDoubleStats(new DoubleColumnStatsData()); + break; + + case STRING_STATS: + csd.setStringStats(new StringColumnStatsData()); + break; + + case BINARY_STATS: + csd.setBinaryStats(new BinaryColumnStatsData()); + break; + + case DECIMAL_STATS: + csd.setDecimalStats(new DecimalColumnStatsData()); + break; + + case DATE_STATS: + csd.setDateStats(new DateColumnStatsData()); + break; + + default: + throw new IllegalArgumentException("Unknown stats type"); + } + + cso.setStatsData(csd); + return cso; + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java new file mode 100644 index 0000000..2542a00 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Date; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; + +public class DateColumnStatsMerger extends ColumnStatsMerger { + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + DateColumnStatsData aggregateData = aggregateColStats.getStatsData().getDateStats(); + DateColumnStatsData newData = newColStats.getStatsData().getDateStats(); + Date lowValue = aggregateData.getLowValue().compareTo(newData.getLowValue()) < 0 ? aggregateData + .getLowValue() : newData.getLowValue(); + aggregateData.setLowValue(lowValue); + Date highValue = aggregateData.getHighValue().compareTo(newData.getHighValue()) >= 0 ? aggregateData + .getHighValue() : newData.getHighValue(); + aggregateData.setHighValue(highValue); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 + || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } else { + NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(aggregateData.getBitVectors()); + NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors()); + long ndv = -1; + if (oldEst.canMerge(newEst)) { + oldEst.mergeEstimators(newEst); + ndv = oldEst.estimateNumDistinctValues(); + aggregateData.setBitVectors(oldEst.serialize()); + } else { + ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); + } + LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " + + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); + aggregateData.setNumDVs(ndv); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java new file mode 100644 index 0000000..4e8e129 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; + +public class DecimalColumnStatsMerger extends ColumnStatsMerger { + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + DecimalColumnStatsData aggregateData = aggregateColStats.getStatsData().getDecimalStats(); + DecimalColumnStatsData newData = newColStats.getStatsData().getDecimalStats(); + Decimal lowValue = aggregateData.getLowValue() != null + && (aggregateData.getLowValue().compareTo(newData.getLowValue()) > 0) ? aggregateData + .getLowValue() : newData.getLowValue(); + aggregateData.setLowValue(lowValue); + Decimal highValue = aggregateData.getHighValue() != null + && (aggregateData.getHighValue().compareTo(newData.getHighValue()) > 0) ? aggregateData + .getHighValue() : newData.getHighValue(); + aggregateData.setHighValue(highValue); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 + || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } else { + NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(aggregateData.getBitVectors()); + NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors()); + long ndv = -1; + if (oldEst.canMerge(newEst)) { + oldEst.mergeEstimators(newEst); + ndv = oldEst.estimateNumDistinctValues(); + aggregateData.setBitVectors(oldEst.serialize()); + } else { + ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); + } + LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " + + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); + aggregateData.setNumDVs(ndv); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java new file mode 100644 index 0000000..4ef5c39 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; + +public class DoubleColumnStatsMerger extends ColumnStatsMerger { + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + DoubleColumnStatsData aggregateData = aggregateColStats.getStatsData().getDoubleStats(); + DoubleColumnStatsData newData = newColStats.getStatsData().getDoubleStats(); + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 + || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } else { + NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(aggregateData.getBitVectors()); + NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors()); + long ndv = -1; + if (oldEst.canMerge(newEst)) { + oldEst.mergeEstimators(newEst); + ndv = oldEst.estimateNumDistinctValues(); + aggregateData.setBitVectors(oldEst.serialize()); + } else { + ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); + } + LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " + + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); + aggregateData.setNumDVs(ndv); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java new file mode 100644 index 0000000..acf7f03 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; + +public class LongColumnStatsMerger extends ColumnStatsMerger { + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + LongColumnStatsData aggregateData = aggregateColStats.getStatsData().getLongStats(); + LongColumnStatsData newData = newColStats.getStatsData().getLongStats(); + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 + || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } else { + NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(aggregateData.getBitVectors()); + NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors()); + long ndv = -1; + if (oldEst.canMerge(newEst)) { + oldEst.mergeEstimators(newEst); + ndv = oldEst.estimateNumDistinctValues(); + aggregateData.setBitVectors(oldEst.serialize()); + } else { + ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); + } + LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " + + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); + aggregateData.setNumDVs(ndv); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java new file mode 100644 index 0000000..b3cd33c --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.columnstats.merge; + +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; +import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; + +public class StringColumnStatsMerger extends ColumnStatsMerger { + @Override + public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + StringColumnStatsData aggregateData = aggregateColStats.getStatsData().getStringStats(); + StringColumnStatsData newData = newColStats.getStatsData().getStringStats(); + aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); + aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 + || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } else { + NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(aggregateData.getBitVectors()); + NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory + .getNumDistinctValueEstimator(newData.getBitVectors()); + long ndv = -1; + if (oldEst.canMerge(newEst)) { + oldEst.mergeEstimators(newEst); + ndv = oldEst.estimateNumDistinctValues(); + aggregateData.setBitVectors(oldEst.serialize()); + } else { + ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); + } + LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " + + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); + aggregateData.setNumDVs(ndv); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java deleted file mode 100644 index 4ca4229..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.protobuf.InvalidProtocolBufferException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterBase; -import org.apache.hive.common.util.BloomFilter; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * Filter for scanning aggregates stats table - */ -public class AggrStatsInvalidatorFilter extends FilterBase { - private static final Logger LOG = - LoggerFactory.getLogger(AggrStatsInvalidatorFilter.class.getName()); - private final List entries; - private final long runEvery; - private final long maxCacheEntryLife; - // This class is not serializable, so I realize transient doesn't mean anything. It's just to - // comunicate that we don't serialize this and ship it across to the filter on the other end. - // We use the time the filter is actually instantiated in HBase. - private transient long now; - - public static Filter parseFrom(byte[] serialized) throws DeserializationException { - try { - return new AggrStatsInvalidatorFilter( - HbaseMetastoreProto.AggrStatsInvalidatorFilter.parseFrom(serialized)); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - } - - /** - * @param proto Protocol buffer representation of this filter. - */ - AggrStatsInvalidatorFilter(HbaseMetastoreProto.AggrStatsInvalidatorFilter proto) { - this.entries = proto.getToInvalidateList(); - this.runEvery = proto.getRunEvery(); - this.maxCacheEntryLife = proto.getMaxCacheEntryLife(); - now = System.currentTimeMillis(); - } - - @Override - public byte[] toByteArray() throws IOException { - return HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() - .addAllToInvalidate(entries) - .setRunEvery(runEvery) - .setMaxCacheEntryLife(maxCacheEntryLife) - .build() - .toByteArray(); - } - - @Override - public boolean filterAllRemaining() throws IOException { - return false; - } - - @Override - public ReturnCode filterKeyValue(Cell cell) throws IOException { - // Is this the partition we want? - if (Arrays.equals(CellUtil.cloneQualifier(cell), HBaseReadWrite.AGGR_STATS_BLOOM_COL)) { - HbaseMetastoreProto.AggrStatsBloomFilter fromCol = - HbaseMetastoreProto.AggrStatsBloomFilter.parseFrom(CellUtil.cloneValue(cell)); - BloomFilter bloom = null; - if (now - maxCacheEntryLife > fromCol.getAggregatedAt()) { - // It's too old, kill it regardless of whether we were asked to or not. - return ReturnCode.INCLUDE; - } else if (now - runEvery * 2 <= fromCol.getAggregatedAt()) { - // It's too new. We might be stomping on something that was just created. Skip it. - return ReturnCode.NEXT_ROW; - } else { - // Look through each of our entries and see if any of them match. - for (HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry entry : entries) { - // First check if we match on db and table match - if (entry.getDbName().equals(fromCol.getDbName()) && - entry.getTableName().equals(fromCol.getTableName())) { - if (bloom == null) { - // Now, reconstitute the bloom filter and probe it with each of our partition names - bloom = new BloomFilter( - fromCol.getBloomFilter().getBitsList(), - fromCol.getBloomFilter().getNumBits(), - fromCol.getBloomFilter().getNumFuncs()); - } - if (bloom.test(entry.getPartName().toByteArray())) { - // This is most likely a match, so mark it and quit looking. - return ReturnCode.INCLUDE; - } - } - } - } - return ReturnCode.NEXT_ROW; - } else { - return ReturnCode.NEXT_COL; - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java deleted file mode 100644 index 2359939..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.common.annotations.VisibleForTesting; - -/** - * A simple metric to count how many times something occurs. - */ -class Counter { - private final String name; - private long cnt; - - Counter(String name) { - this.name = name; - cnt = 0; - } - - void incr() { - cnt++; - } - - void clear() { - cnt = 0; - } - - String dump() { - StringBuilder bldr = new StringBuilder("Dumping metric: "); - bldr.append(name).append(' ').append(cnt); - return bldr.toString(); - } - - @VisibleForTesting long getCnt() { - return cnt; - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java deleted file mode 100644 index 696e588..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hbase.client.HTableInterface; - -import java.io.IOException; -import java.util.List; - -/** - * A connection to HBase. Separated out as an interface so we can slide different transaction - * managers between our code and HBase. - */ -public interface HBaseConnection extends Configurable { - - /** - * Connects to HBase. This must be called after {@link #setConf} has been called. - * @throws IOException - */ - void connect() throws IOException; - - /** - * Close the connection. No further operations are possible after this is done. - * @throws IOException - */ - void close() throws IOException; - - /** - * Begin a transaction. - * @throws IOException - */ - void beginTransaction() throws IOException; - - /** - * Commit a transaction - * @throws IOException indicates the commit has failed - */ - void commitTransaction() throws IOException; - - /** - * Rollback a transaction - * @throws IOException - */ - void rollbackTransaction() throws IOException; - - /** - * Flush commits. A no-op for transaction implementations since they will write at commit time. - * @param htab Table to flush - * @throws IOException - */ - void flush(HTableInterface htab) throws IOException; - - /** - * Create a new table - * @param tableName name of the table - * @param columnFamilies name of the column families in the table - * @throws IOException - */ - void createHBaseTable(String tableName, List columnFamilies) throws IOException; - - /** - * Fetch an existing HBase table. - * @param tableName name of the table - * @return table handle - * @throws IOException - */ - HTableInterface getHBaseTable(String tableName) throws IOException; - - /** - * Fetch an existing HBase table and force a connection to it. This should be used only in - * cases where you want to assure that the table exists (ie at install). - * @param tableName name of the table - * @param force if true, force a connection by fetching a non-existant key - * @return table handle - * @throws IOException - */ - HTableInterface getHBaseTable(String tableName, boolean force) throws IOException; - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java deleted file mode 100644 index 3c03846..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java +++ /dev/null @@ -1,612 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.IdentityHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.filter.CompareFilter; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; - - -/** - * Utility function for generating hbase partition filtering plan representation - * from ExpressionTree. - * Optimizations to be done - - * - Case where all partition keys are specified. Should use a get - * - * {@link PartitionFilterGenerator} is a visitor on the given filter expression tree. After - * walking it it produces the HBase execution plan represented by {@link FilterPlan}. See - * their javadocs for more details. - */ -class HBaseFilterPlanUtil { - - /** - * Compare two byte arrays. - * - * @param ar1 - * first byte array - * @param ar2 - * second byte array - * @return -1 if ar1 < ar2, 0 if == , 1 if > - */ - static int compare(byte[] ar1, byte[] ar2) { - // null check is not needed, nulls are not passed here - for (int i = 0; i < ar1.length; i++) { - if (i == ar2.length) { - return 1; - } else { - if (ar1[i] == ar2[i]) { - continue; - } else if (ar1[i] > ar2[i]) { - return 1; - } else { - return -1; - } - } - } - // ar2 equal until length of ar1. - if(ar1.length == ar2.length) { - return 0; - } - // ar2 has more bytes - return -1; - } - - /** - * Represents the execution plan for hbase to find the set of partitions that - * match given filter expression. - * If you have an AND or OR of two expressions, you can determine FilterPlan for each - * children and then call lhs.and(rhs) or lhs.or(rhs) respectively - * to generate a new plan for the expression. - * - * The execution plan has one or more ScanPlan objects. To get the results the set union of all - * ScanPlan objects needs to be done. - */ - public static abstract class FilterPlan { - abstract FilterPlan and(FilterPlan other); - abstract FilterPlan or(FilterPlan other); - abstract List getPlans(); - @Override - public String toString() { - return getPlans().toString(); - } - - } - - /** - * Represents a union/OR of single scan plans (ScanPlan). - */ - public static class MultiScanPlan extends FilterPlan { - final ImmutableList scanPlans; - - public MultiScanPlan(List scanPlans){ - this.scanPlans = ImmutableList.copyOf(scanPlans); - } - - @Override - public FilterPlan and(FilterPlan other) { - // Convert to disjunctive normal form (DNF), ie OR of ANDs - // First get a new set of FilterPlans by doing an AND - // on each ScanPlan in this one with the other FilterPlan - List newFPlans = new ArrayList(); - for (ScanPlan splan : getPlans()) { - newFPlans.add(splan.and(other)); - } - //now combine scanPlans in multiple new FilterPlans into one - // MultiScanPlan - List newScanPlans = new ArrayList(); - for (FilterPlan fp : newFPlans) { - newScanPlans.addAll(fp.getPlans()); - } - return new MultiScanPlan(newScanPlans); - } - - @Override - public FilterPlan or(FilterPlan other) { - // just combine the ScanPlans - List newScanPlans = new ArrayList(this.getPlans()); - newScanPlans.addAll(other.getPlans()); - return new MultiScanPlan(newScanPlans); - } - - @Override - public List getPlans() { - return scanPlans; - } - } - - /** - * Represents a single Hbase Scan api call - */ - public static class ScanPlan extends FilterPlan { - - public static class ScanMarker { - final String value; - /** - * If inclusive = true, it means that the - * marker includes those bytes. - * If it is false, it means the marker starts at the next possible byte array - * or ends at the next possible byte array - */ - final boolean isInclusive; - final String type; - ScanMarker(String obj, boolean i, String type){ - this.value = obj; - this.isInclusive = i; - this.type = type; - } - @Override - public String toString() { - return "ScanMarker [" + "value=" + value.toString() + ", isInclusive=" + isInclusive + - ", type=" + type + "]"; - } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + value.hashCode(); - result = prime * result + (isInclusive ? 1231 : 1237); - result = prime * result + type.hashCode(); - return result; - } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - ScanMarker other = (ScanMarker) obj; - if (!value.equals(other.value)) - return false; - if (isInclusive != other.isInclusive) - return false; - if (type != other.type) - return false; - return true; - } - } - public static class ScanMarkerPair { - public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) { - this.startMarker = startMarker; - this.endMarker = endMarker; - } - ScanMarker startMarker; - ScanMarker endMarker; - } - // represent Scan start, partition key name -> scanMarkerPair - Map markers = new HashMap(); - List ops = new ArrayList(); - - // Get the number of partition key prefixes which can be used in the scan range. - // For example, if partition key is (year, month, state) - // 1. year = 2015 and month >= 1 and month < 5 - // year + month can be used in scan range, majorParts = 2 - // 2. year = 2015 and state = 'CA' - // only year can be used in scan range, majorParts = 1 - // 3. month = 10 and state = 'CA' - // nothing can be used in scan range, majorParts = 0 - private int getMajorPartsCount(List parts) { - int majorPartsCount = 0; - while (majorPartsCount parts) { - int majorPartsCount = getMajorPartsCount(parts); - Set majorKeys = new HashSet(); - for (int i=0;i names = HBaseUtils.getPartitionNames(parts); - List ranges = new ArrayList(); - for (Map.Entry entry : markers.entrySet()) { - if (names.contains(entry.getKey()) && !majorKeys.contains(entry.getKey())) { - PartitionKeyComparator.Mark startMark = null; - if (entry.getValue().startMarker != null) { - startMark = new PartitionKeyComparator.Mark(entry.getValue().startMarker.value, - entry.getValue().startMarker.isInclusive); - } - PartitionKeyComparator.Mark endMark = null; - if (entry.getValue().endMarker != null) { - startMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value, - entry.getValue().endMarker.isInclusive); - } - PartitionKeyComparator.Range range = new PartitionKeyComparator.Range( - entry.getKey(), startMark, endMark); - ranges.add(range); - } - } - - if (ranges.isEmpty() && ops.isEmpty()) { - return null; - } else { - return new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator( - StringUtils.join(names, ","), StringUtils.join(HBaseUtils.getPartitionKeyTypes(parts), ","), - ranges, ops)); - } - } - - public void setStartMarker(String keyName, String keyType, String start, boolean isInclusive) { - if (markers.containsKey(keyName)) { - markers.get(keyName).startMarker = new ScanMarker(start, isInclusive, keyType); - } else { - ScanMarkerPair marker = new ScanMarkerPair(new ScanMarker(start, isInclusive, keyType), null); - markers.put(keyName, marker); - } - } - - public ScanMarker getStartMarker(String keyName) { - if (markers.containsKey(keyName)) { - return markers.get(keyName).startMarker; - } else { - return null; - } - } - - public void setEndMarker(String keyName, String keyType, String end, boolean isInclusive) { - if (markers.containsKey(keyName)) { - markers.get(keyName).endMarker = new ScanMarker(end, isInclusive, keyType); - } else { - ScanMarkerPair marker = new ScanMarkerPair(null, new ScanMarker(end, isInclusive, keyType)); - markers.put(keyName, marker); - } - } - - public ScanMarker getEndMarker(String keyName) { - if (markers.containsKey(keyName)) { - return markers.get(keyName).endMarker; - } else { - return null; - } - } - - @Override - public FilterPlan and(FilterPlan other) { - List newSPlans = new ArrayList(); - for (ScanPlan otherSPlan : other.getPlans()) { - newSPlans.add(this.and(otherSPlan)); - } - return new MultiScanPlan(newSPlans); - } - - private ScanPlan and(ScanPlan other) { - // create combined FilterPlan based on existing lhs and rhs plan - ScanPlan newPlan = new ScanPlan(); - newPlan.markers.putAll(markers); - - for (String keyName : other.markers.keySet()) { - if (newPlan.markers.containsKey(keyName)) { - // create new scan start - ScanMarker greaterStartMarker = getComparedMarker(this.getStartMarker(keyName), - other.getStartMarker(keyName), true); - if (greaterStartMarker != null) { - newPlan.setStartMarker(keyName, greaterStartMarker.type, greaterStartMarker.value, greaterStartMarker.isInclusive); - } - - // create new scan end - ScanMarker lesserEndMarker = getComparedMarker(this.getEndMarker(keyName), other.getEndMarker(keyName), - false); - if (lesserEndMarker != null) { - newPlan.setEndMarker(keyName, lesserEndMarker.type, lesserEndMarker.value, lesserEndMarker.isInclusive); - } - } else { - newPlan.markers.put(keyName, other.markers.get(keyName)); - } - } - - newPlan.ops.addAll(ops); - newPlan.ops.addAll(other.ops); - return newPlan; - } - - /** - * @param lStartMarker - * @param rStartMarker - * @param getGreater if true return greater startmarker, else return smaller one - * @return greater/lesser marker depending on value of getGreater - */ - @VisibleForTesting - static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMarker, - boolean getGreater) { - // if one of them has null bytes, just return other - if(lStartMarker == null) { - return rStartMarker; - } else if (rStartMarker == null) { - return lStartMarker; - } - TypeInfo expectedType = - TypeInfoUtils.getTypeInfoFromTypeString(lStartMarker.type); - ObjectInspector outputOI = - TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); - Converter lConverter = ObjectInspectorConverters.getConverter( - PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); - Converter rConverter = ObjectInspectorConverters.getConverter( - PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); - Comparable lValue = (Comparable)lConverter.convert(lStartMarker.value); - Comparable rValue = (Comparable)rConverter.convert(rStartMarker.value); - - int compareRes = lValue.compareTo(rValue); - if (compareRes == 0) { - // bytes are equal, now compare the isInclusive flags - if (lStartMarker.isInclusive == rStartMarker.isInclusive) { - // actually equal, so return any one - return lStartMarker; - } - boolean isInclusive = true; - // one that does not include the current bytes is greater - if (getGreater) { - isInclusive = false; - } - // else - return new ScanMarker(lStartMarker.value, isInclusive, lStartMarker.type); - } - if (getGreater) { - return compareRes == 1 ? lStartMarker : rStartMarker; - } - // else - return compareRes == -1 ? lStartMarker : rStartMarker; - } - - - @Override - public FilterPlan or(FilterPlan other) { - List plans = new ArrayList(getPlans()); - plans.addAll(other.getPlans()); - return new MultiScanPlan(plans); - } - - @Override - public List getPlans() { - return Arrays.asList(this); - } - - - /** - * @return row suffix - This is appended to db + table, to generate start row for the Scan - */ - public byte[] getStartRowSuffix(String dbName, String tableName, List parts) { - int majorPartsCount = getMajorPartsCount(parts); - List majorPartTypes = new ArrayList(); - List components = new ArrayList(); - boolean endPrefix = false; - for (int i=0;i parts) { - int majorPartsCount = getMajorPartsCount(parts); - List majorPartTypes = new ArrayList(); - List components = new ArrayList(); - boolean endPrefix = false; - for (int i=0;i entry : markers.entrySet()) { - sb.append("key=" + entry.getKey() + "[startMarker=" + entry.getValue().startMarker - + ", endMarker=" + entry.getValue().endMarker + "]"); - } - return sb.toString(); - } - - } - - /** - * Visitor for ExpressionTree. - * It first generates the ScanPlan for the leaf nodes. The higher level nodes are - * either AND or OR operations. It then calls FilterPlan.and and FilterPlan.or with - * the child nodes to generate the plans for higher level nodes. - */ - @VisibleForTesting - static class PartitionFilterGenerator extends TreeVisitor { - private FilterPlan curPlan; - - // this tells us if there is a condition that did not get included in the plan - // such condition would be treated as getting evaluated to TRUE - private boolean hasUnsupportedCondition = false; - - //Need to cache the left plans for the TreeNode. Use IdentityHashMap here - // as we don't want to dedupe on two TreeNode that are otherwise considered equal - Map leftPlans = new IdentityHashMap(); - - // temporary params for current left and right side plans, for AND, OR - private FilterPlan rPlan; - - private Map nameToType = new HashMap(); - - public PartitionFilterGenerator(List parts) { - for (FieldSchema part : parts) { - nameToType.put(part.getName(), part.getType()); - } - } - - FilterPlan getPlan() { - return curPlan; - } - - @Override - protected void beginTreeNode(TreeNode node) throws MetaException { - // reset the params - curPlan = rPlan = null; - } - - @Override - protected void midTreeNode(TreeNode node) throws MetaException { - leftPlans.put(node, curPlan); - curPlan = null; - } - - @Override - protected void endTreeNode(TreeNode node) throws MetaException { - rPlan = curPlan; - FilterPlan lPlan = leftPlans.get(node); - leftPlans.remove(node); - - switch (node.getAndOr()) { - case AND: - curPlan = lPlan.and(rPlan); - break; - case OR: - curPlan = lPlan.or(rPlan); - break; - default: - throw new AssertionError("Unexpected logical operation " + node.getAndOr()); - } - - } - - - @Override - public void visit(LeafNode node) throws MetaException { - ScanPlan leafPlan = new ScanPlan(); - curPlan = leafPlan; - - // this is a condition on first partition column, so might influence the - // start and end of the scan - final boolean INCLUSIVE = true; - switch (node.operator) { - case EQUALS: - leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); - leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); - break; - case GREATERTHAN: - leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), !INCLUSIVE); - break; - case GREATERTHANOREQUALTO: - leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); - break; - case LESSTHAN: - leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), !INCLUSIVE); - break; - case LESSTHANOREQUALTO: - leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); - break; - case LIKE: - leafPlan.ops.add(new Operator(Operator.Type.LIKE, node.keyName, node.value.toString())); - break; - case NOTEQUALS: - case NOTEQUALS2: - leafPlan.ops.add(new Operator(Operator.Type.NOTEQUALS, node.keyName, node.value.toString())); - break; - } - } - - private boolean hasUnsupportedCondition() { - return hasUnsupportedCondition; - } - - } - - public static class PlanResult { - public final FilterPlan plan; - public final boolean hasUnsupportedCondition; - PlanResult(FilterPlan plan, boolean hasUnsupportedCondition) { - this.plan = plan; - this.hasUnsupportedCondition = hasUnsupportedCondition; - } - } - - public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts) throws MetaException { - if (exprTree == null) { - // TODO: if exprTree is null, we should do what ObjectStore does. See HIVE-10102 - return new PlanResult(new ScanPlan(), true); - } - PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts); - exprTree.accept(pGenerator); - return new PlanResult(pGenerator.getPlan(), pGenerator.hasUnsupportedCondition()); - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java deleted file mode 100644 index 5f89769..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java +++ /dev/null @@ -1,619 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.common.annotations.VisibleForTesting; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.OptionBuilder; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Deadline; -import org.apache.hadoop.hive.metastore.ObjectStore; -import org.apache.hadoop.hive.metastore.RawStore; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.Table; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; - -/** - * A tool to take the contents of an RDBMS based Hive metastore and import it into an HBase based - * one. To use this the config files for Hive configured to work with the RDBMS (that is, - * including the JDBC string, etc.) as well as HBase configuration files must be in the path. - * There should not be a hive-site.xml that specifies HBaseStore in the path. This tool will then - * handle connecting to the RDBMS via the {@link org.apache.hadoop.hive.metastore.ObjectStore} - * and HBase via {@link org.apache.hadoop.hive.metastore.hbase.HBaseStore} and transferring the - * data. - * - * This tool can import an entire metastore or only selected objects. When selecting objects it - * is necessary to fully specify the object's name. For example, if you want to import the table - * T in the default database it needs to be identified as default.T. The same is true for - * functions. When an object is specified, everything under that object will be imported (e.g. - * if you select database D, then all tables and functions in that database will be - * imported as well). - * - * At this point only tables and partitions are handled in parallel as it is assumed there are - * relatively few of everything else. - * - * Note that HBaseSchemaTool must have already been used to create the appropriate tables in HBase. - */ -public class HBaseImport { - - static final private Logger LOG = LoggerFactory.getLogger(HBaseImport.class.getName()); - - public static int main(String[] args) { - try { - HBaseImport tool = new HBaseImport(); - int rv = tool.init(args); - if (rv != 0) return rv; - tool.run(); - } catch (Exception e) { - System.err.println("Caught exception " + e.getClass().getName() + " with message <" + - e.getMessage() + ">"); - return 1; - } - return 0; - } - - private ThreadLocal rdbmsStore = new ThreadLocal() { - @Override - protected RawStore initialValue() { - if (rdbmsConf == null) { - throw new RuntimeException("order violation, need to set rdbms conf first"); - } - RawStore os = new ObjectStore(); - os.setConf(rdbmsConf); - return os; - } - }; - - private ThreadLocal hbaseStore = new ThreadLocal() { - @Override - protected RawStore initialValue() { - if (hbaseConf == null) { - throw new RuntimeException("order violation, need to set hbase conf first"); - } - RawStore hs = new HBaseStore(); - hs.setConf(hbaseConf); - return hs; - } - }; - - private Configuration rdbmsConf; - private Configuration hbaseConf; - private List dbs; - private BlockingQueue
partitionedTables; - private BlockingQueue tableNameQueue; - private BlockingQueue indexNameQueue; - private BlockingQueue partQueue; - private boolean writingToQueue, readersFinished; - private boolean doKerberos, doAll; - private List rolesToImport, dbsToImport, tablesToImport, functionsToImport; - private int parallel; - private int batchSize; - - private HBaseImport() {} - - @VisibleForTesting - public HBaseImport(String... args) throws ParseException { - init(args); - } - - private int init(String... args) throws ParseException { - Options options = new Options(); - - doAll = doKerberos = false; - parallel = 1; - batchSize = 1000; - - options.addOption(OptionBuilder - .withLongOpt("all") - .withDescription("Import the full metastore") - .create('a')); - - options.addOption(OptionBuilder - .withLongOpt("batchsize") - .withDescription("Number of partitions to read and write in a batch, defaults to 1000") - .hasArg() - .create('b')); - - options.addOption(OptionBuilder - .withLongOpt("database") - .withDescription("Import a single database") - .hasArgs() - .create('d')); - - options.addOption(OptionBuilder - .withLongOpt("help") - .withDescription("You're looking at it") - .create('h')); - - options.addOption(OptionBuilder - .withLongOpt("function") - .withDescription("Import a single function") - .hasArgs() - .create('f')); - - options.addOption(OptionBuilder - .withLongOpt("kerberos") - .withDescription("Import all kerberos related objects (master key, tokens)") - .create('k')); - - options.addOption(OptionBuilder - .withLongOpt("parallel") - .withDescription("Parallel factor for loading (only applied to tables and partitions), " + - "defaults to 1") - .hasArg() - .create('p')); - - options.addOption(OptionBuilder - .withLongOpt("role") - .withDescription("Import a single role") - .hasArgs() - .create('r')); - - options.addOption(OptionBuilder - .withLongOpt("tables") - .withDescription("Import a single tables") - .hasArgs() - .create('t')); - - CommandLine cli = new GnuParser().parse(options, args); - - // Process help, if it was asked for, this must be done first - if (cli.hasOption('h')) { - printHelp(options); - return 1; - } - - boolean hasCmd = false; - // Now process the other command line args - if (cli.hasOption('a')) { - hasCmd = true; - doAll = true; - } - if (cli.hasOption('b')) { - batchSize = Integer.parseInt(cli.getOptionValue('b')); - } - if (cli.hasOption('d')) { - hasCmd = true; - dbsToImport = Arrays.asList(cli.getOptionValues('d')); - } - if (cli.hasOption('f')) { - hasCmd = true; - functionsToImport = Arrays.asList(cli.getOptionValues('f')); - } - if (cli.hasOption('p')) { - parallel = Integer.parseInt(cli.getOptionValue('p')); - } - if (cli.hasOption('r')) { - hasCmd = true; - rolesToImport = Arrays.asList(cli.getOptionValues('r')); - } - if (cli.hasOption('k')) { - doKerberos = true; - } - if (cli.hasOption('t')) { - hasCmd = true; - tablesToImport = Arrays.asList(cli.getOptionValues('t')); - } - if (!hasCmd) { - printHelp(options); - return 1; - } - - dbs = new ArrayList<>(); - // We don't want to bound the size of the table queue because we keep it all in memory - partitionedTables = new LinkedBlockingQueue<>(); - tableNameQueue = new LinkedBlockingQueue<>(); - indexNameQueue = new LinkedBlockingQueue<>(); - - // Bound the size of this queue so we don't get too much in memory. - partQueue = new ArrayBlockingQueue<>(parallel * 2); - return 0; - } - - private void printHelp(Options options) { - (new HelpFormatter()).printHelp("hbaseschematool", options); - } - - @VisibleForTesting - void run() throws MetaException, InstantiationException, IllegalAccessException, - NoSuchObjectException, InvalidObjectException, InterruptedException { - // Order here is crucial, as you can't add tables until you've added databases, etc. - init(); - if (doAll || rolesToImport != null) { - copyRoles(); - } - if (doAll || dbsToImport != null) { - copyDbs(); - } - if (doAll || dbsToImport != null || tablesToImport != null) { - copyTables(); - copyPartitions(); - copyIndexes(); - } - if (doAll || dbsToImport != null || functionsToImport != null) { - copyFunctions(); - } - if (doAll || doKerberos) { - copyKerberos(); - } - } - - private void init() throws MetaException, IllegalAccessException, InstantiationException { - if (rdbmsConf != null) { - // We've been configured for testing, so don't do anything here. - return; - } - // We're depending on having everything properly in the path - rdbmsConf = new HiveConf(); - hbaseConf = new HiveConf();// - HiveConf.setVar(hbaseConf, HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - HBaseStore.class.getName()); - HiveConf.setBoolVar(hbaseConf, HiveConf.ConfVars.METASTORE_FASTPATH, true); - - // First get a connection to the RDBMS based store - rdbmsStore.get().setConf(rdbmsConf); - - // Get a connection to the HBase based store - hbaseStore.get().setConf(hbaseConf); - } - - private void copyRoles() throws NoSuchObjectException, InvalidObjectException, MetaException { - screen("Copying roles"); - List toCopy = doAll ? rdbmsStore.get().listRoleNames() : rolesToImport; - for (String roleName : toCopy) { - Role role = rdbmsStore.get().getRole(roleName); - screen("Copying role " + roleName); - hbaseStore.get().addRole(roleName, role.getOwnerName()); - } - } - - private void copyDbs() throws MetaException, NoSuchObjectException, InvalidObjectException { - screen("Copying databases"); - List toCopy = doAll ? rdbmsStore.get().getAllDatabases() : dbsToImport; - for (String dbName : toCopy) { - Database db = rdbmsStore.get().getDatabase(dbName); - dbs.add(db); - screen("Copying database " + dbName); - hbaseStore.get().createDatabase(db); - } - } - - private void copyTables() throws MetaException, InvalidObjectException, InterruptedException { - screen("Copying tables"); - - // Start the parallel threads that will copy the tables - Thread[] copiers = new Thread[parallel]; - writingToQueue = true; - for (int i = 0; i < parallel; i++) { - copiers[i] = new TableCopier(); - copiers[i].start(); - } - - // Put tables from the databases we copied into the queue - for (Database db : dbs) { - screen("Coyping tables in database " + db.getName()); - for (String tableName : rdbmsStore.get().getAllTables(db.getName())) { - tableNameQueue.put(new String[]{db.getName(), tableName}); - } - } - - // Now put any specifically requested tables into the queue - if (tablesToImport != null) { - for (String compoundTableName : tablesToImport) { - String[] tn = compoundTableName.split("\\."); - if (tn.length != 2) { - error(compoundTableName + " not in proper form. Must be in form dbname.tablename. " + - "Ignoring this table and continuing."); - } else { - tableNameQueue.put(new String[]{tn[0], tn[1]}); - } - } - } - writingToQueue = false; - - // Wait until we've finished adding all the tables - for (Thread copier : copiers) copier.join(); - } - - private class TableCopier extends Thread { - @Override - public void run() { - while (writingToQueue || tableNameQueue.size() > 0) { - try { - String[] name = tableNameQueue.poll(1, TimeUnit.SECONDS); - if (name != null) { - Table table = rdbmsStore.get().getTable(name[0], name[1]); - // If this has partitions, put it in the list to fetch partions for - if (table.getPartitionKeys() != null && table.getPartitionKeys().size() > 0) { - partitionedTables.put(table); - } - screen("Copying table " + name[0] + "." + name[1]); - hbaseStore.get().createTable(table); - - // See if the table has any constraints, and if so copy those as well - List pk = - rdbmsStore.get().getPrimaryKeys(table.getDbName(), table.getTableName()); - if (pk != null && pk.size() > 0) { - LOG.debug("Found primary keys, adding them"); - hbaseStore.get().addPrimaryKeys(pk); - } - - // Passing null as the target table name results in all of the foreign keys being - // retrieved. - List fks = - rdbmsStore.get().getForeignKeys(null, null, table.getDbName(), table.getTableName()); - if (fks != null && fks.size() > 0) { - LOG.debug("Found foreign keys, adding them"); - hbaseStore.get().addForeignKeys(fks); - } - } - } catch (InterruptedException | MetaException | InvalidObjectException e) { - throw new RuntimeException(e); - } - } - } - } - - private void copyIndexes() throws MetaException, InvalidObjectException, InterruptedException { - screen("Copying indexes"); - - // Start the parallel threads that will copy the indexes - Thread[] copiers = new Thread[parallel]; - writingToQueue = true; - for (int i = 0; i < parallel; i++) { - copiers[i] = new IndexCopier(); - copiers[i].start(); - } - - // Put indexes from the databases we copied into the queue - for (Database db : dbs) { - screen("Coyping indexes in database " + db.getName()); - for (String tableName : rdbmsStore.get().getAllTables(db.getName())) { - for (Index index : rdbmsStore.get().getIndexes(db.getName(), tableName, -1)) { - indexNameQueue.put(new String[]{db.getName(), tableName, index.getIndexName()}); - } - } - } - - // Now put any specifically requested tables into the queue - if (tablesToImport != null) { - for (String compoundTableName : tablesToImport) { - String[] tn = compoundTableName.split("\\."); - if (tn.length != 2) { - error(compoundTableName + " not in proper form. Must be in form dbname.tablename. " + - "Ignoring this table and continuing."); - } else { - for (Index index : rdbmsStore.get().getIndexes(tn[0], tn[1], -1)) { - indexNameQueue.put(new String[]{tn[0], tn[1], index.getIndexName()}); - } - } - } - } - - writingToQueue = false; - - // Wait until we've finished adding all the tables - for (Thread copier : copiers) copier.join(); - } - - private class IndexCopier extends Thread { - @Override - public void run() { - while (writingToQueue || indexNameQueue.size() > 0) { - try { - String[] name = indexNameQueue.poll(1, TimeUnit.SECONDS); - if (name != null) { - Index index = rdbmsStore.get().getIndex(name[0], name[1], name[2]); - screen("Copying index " + name[0] + "." + name[1] + "." + name[2]); - hbaseStore.get().addIndex(index); - } - } catch (InterruptedException | MetaException | InvalidObjectException e) { - throw new RuntimeException(e); - } - } - } - } - - /* Partition copying is a little complex. As we went through and copied the tables we put each - * partitioned table into a queue. We will now go through that queue and add partitions for the - * tables. We do the finding of partitions and writing of them separately and in parallel. - * This way if there is one table with >> partitions then all of the others that skew won't - * hurt us. To avoid pulling all of the partitions for a table into memory, we batch up - * partitions (by default in batches of 1000) and copy them over in batches. - */ - private void copyPartitions() throws MetaException, NoSuchObjectException, - InvalidObjectException, InterruptedException { - screen("Copying partitions"); - readersFinished = false; - Thread[] readers = new Thread[parallel]; - Thread[] writers = new Thread[parallel]; - for (int i = 0; i < parallel; i++) { - readers[i] = new PartitionReader(); - readers[i].start(); - writers[i] = new PartitionWriter(); - writers[i].start(); - } - - for (Thread reader : readers) reader.join(); - readersFinished = true; - - // Wait until we've finished adding all the partitions - for (Thread writer : writers) writer.join(); - } - - private class PartitionReader extends Thread { - @Override - public void run() { - while (partitionedTables.size() > 0) { - try { - Table table = partitionedTables.poll(1, TimeUnit.SECONDS); - if (table != null) { - screen("Fetching partitions for table " + table.getDbName() + "." + - table.getTableName()); - List partNames = - rdbmsStore.get().listPartitionNames(table.getDbName(), table.getTableName(), - (short) -1); - if (partNames.size() <= batchSize) { - LOG.debug("Adding all partition names to queue for " + table.getDbName() + "." + - table.getTableName()); - partQueue.put(new PartQueueEntry(table.getDbName(), table.getTableName(), partNames)); - } else { - int goUntil = partNames.size() % batchSize == 0 ? partNames.size() / batchSize : - partNames.size() / batchSize + 1; - for (int i = 0; i < goUntil; i++) { - int start = i * batchSize; - int end = Math.min((i + 1) * batchSize, partNames.size()); - LOG.debug("Adding partitions " + start + " to " + end + " for " + table.getDbName() - + "." + table.getTableName()); - partQueue.put(new PartQueueEntry(table.getDbName(), table.getTableName(), - partNames.subList(start, end))); - } - } - } - } catch (InterruptedException | MetaException e) { - throw new RuntimeException(e); - } - } - } - } - - private class PartitionWriter extends Thread { - @Override - public void run() { - // This keeps us from throwing exceptions in our raw store calls - Deadline.registerIfNot(1000000); - while (!readersFinished || partQueue.size() > 0) { - try { - PartQueueEntry entry = partQueue.poll(1, TimeUnit.SECONDS); - if (entry != null) { - LOG.info("Writing partitions " + entry.dbName + "." + entry.tableName + "." + - StringUtils.join(entry.partNames, ':')); - // Fetch these partitions and write them to HBase - Deadline.startTimer("hbaseimport"); - List parts = - rdbmsStore.get().getPartitionsByNames(entry.dbName, entry.tableName, - entry.partNames); - hbaseStore.get().addPartitions(entry.dbName, entry.tableName, parts); - Deadline.stopTimer(); - } - } catch (InterruptedException | MetaException | InvalidObjectException | - NoSuchObjectException e) { - throw new RuntimeException(e); - } - } - } - } - - private void copyFunctions() throws MetaException, NoSuchObjectException, InvalidObjectException { - screen("Copying functions"); - // Copy any functions from databases we copied. - for (Database db : dbs) { - screen("Copying functions in database " + db.getName()); - for (String funcName : rdbmsStore.get().getFunctions(db.getName(), "*")) { - copyOneFunction(db.getName(), funcName); - } - } - // Now do any specifically requested functions - if (functionsToImport != null) { - for (String compoundFuncName : functionsToImport) { - String[] fn = compoundFuncName.split("\\."); - if (fn.length != 2) { - error(compoundFuncName + " not in proper form. Must be in form dbname.funcname. " + - "Ignoring this function and continuing."); - } else { - copyOneFunction(fn[0], fn[1]); - } - } - } - } - - private void copyOneFunction(String dbName, String funcName) throws MetaException, - InvalidObjectException { - Function func = rdbmsStore.get().getFunction(dbName, funcName); - screen("Copying function " + dbName + "." + funcName); - hbaseStore.get().createFunction(func); - } - - private void copyKerberos() throws MetaException { - screen("Copying kerberos related items"); - for (String tokenId : rdbmsStore.get().getAllTokenIdentifiers()) { - String token = rdbmsStore.get().getToken(tokenId); - hbaseStore.get().addToken(tokenId, token); - } - for (String masterKey : rdbmsStore.get().getMasterKeys()) { - hbaseStore.get().addMasterKey(masterKey); - } - } - - private void screen(String msg) { - LOG.info(msg); - System.out.println(msg); - } - - private void error(String msg) { - LOG.error(msg); - System.err.println("ERROR: " + msg); - } - - @VisibleForTesting - void setConnections(RawStore rdbms, RawStore hbase) { - rdbmsStore.set(rdbms); - hbaseStore.set(hbase); - rdbmsConf = rdbms.getConf(); - hbaseConf = hbase.getConf(); - } - - private static class PartQueueEntry { - final String dbName; - final String tableName; - final List partNames; - - PartQueueEntry(String d, String t, List p) { - dbName = d; - tableName = t; - partNames = p; - } - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java deleted file mode 100644 index ab6457e..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ /dev/null @@ -1,2942 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Iterators; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.CompareFilter; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.RegexStringComparator; -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hive.common.ObjectPair; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator; -import org.apache.hive.common.util.BloomFilter; -import org.apache.thrift.TBase; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.protocol.TSimpleJSONProtocol; -import org.apache.thrift.transport.TMemoryBuffer; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; - - -/** - * Class to manage storing object in and reading them from HBase. - */ -public class HBaseReadWrite implements MetadataStore { - - final static String AGGR_STATS_TABLE = "HBMS_AGGR_STATS"; - final static String DB_TABLE = "HBMS_DBS"; - final static String FUNC_TABLE = "HBMS_FUNCS"; - final static String GLOBAL_PRIVS_TABLE = "HBMS_GLOBAL_PRIVS"; - final static String PART_TABLE = "HBMS_PARTITIONS"; - final static String ROLE_TABLE = "HBMS_ROLES"; - final static String SD_TABLE = "HBMS_SDS"; - final static String SECURITY_TABLE = "HBMS_SECURITY"; - final static String SEQUENCES_TABLE = "HBMS_SEQUENCES"; - final static String TABLE_TABLE = "HBMS_TBLS"; - final static String INDEX_TABLE = "HBMS_INDEX"; - final static String USER_TO_ROLE_TABLE = "HBMS_USER_TO_ROLE"; - final static String FILE_METADATA_TABLE = "HBMS_FILE_METADATA"; - final static byte[] CATALOG_CF = "c".getBytes(HBaseUtils.ENCODING); - final static byte[] STATS_CF = "s".getBytes(HBaseUtils.ENCODING); - final static String NO_CACHE_CONF = "no.use.cache"; - /** - * List of tables in HBase - */ - public final static String[] tableNames = { AGGR_STATS_TABLE, DB_TABLE, FUNC_TABLE, - GLOBAL_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, - ROLE_TABLE, SD_TABLE, SECURITY_TABLE, SEQUENCES_TABLE, - TABLE_TABLE, INDEX_TABLE, FILE_METADATA_TABLE }; - public final static Map> columnFamilies = new HashMap<> (tableNames.length); - - static { - columnFamilies.put(AGGR_STATS_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(DB_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(FUNC_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(GLOBAL_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(PART_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); - columnFamilies.put(USER_TO_ROLE_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(ROLE_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(SD_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(SECURITY_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(SEQUENCES_TABLE, Arrays.asList(CATALOG_CF)); - columnFamilies.put(TABLE_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); - columnFamilies.put(INDEX_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); - // Stats CF will contain PPD stats. - columnFamilies.put(FILE_METADATA_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); - } - - final static byte[] MASTER_KEY_SEQUENCE = "master_key".getBytes(HBaseUtils.ENCODING); - // The change version functionality uses the sequences table, but we don't want to give the - // caller complete control over the sequence name as they might inadvertently clash with one of - // our sequence keys, so add a prefix to their topic name. - - final static byte[] AGGR_STATS_BLOOM_COL = "b".getBytes(HBaseUtils.ENCODING); - private final static byte[] AGGR_STATS_STATS_COL = "s".getBytes(HBaseUtils.ENCODING); - private final static byte[] CATALOG_COL = "c".getBytes(HBaseUtils.ENCODING); - private final static byte[] ROLES_COL = "roles".getBytes(HBaseUtils.ENCODING); - private final static byte[] REF_COUNT_COL = "ref".getBytes(HBaseUtils.ENCODING); - private final static byte[] DELEGATION_TOKEN_COL = "dt".getBytes(HBaseUtils.ENCODING); - private final static byte[] MASTER_KEY_COL = "mk".getBytes(HBaseUtils.ENCODING); - private final static byte[] PRIMARY_KEY_COL = "pk".getBytes(HBaseUtils.ENCODING); - private final static byte[] FOREIGN_KEY_COL = "fk".getBytes(HBaseUtils.ENCODING); - private final static byte[] UNIQUE_CONSTRAINT_COL = "uk".getBytes(HBaseUtils.ENCODING); - private final static byte[] NOT_NULL_CONSTRAINT_COL = "nn".getBytes(HBaseUtils.ENCODING); - private final static byte[] GLOBAL_PRIVS_KEY = "gp".getBytes(HBaseUtils.ENCODING); - private final static byte[] SEQUENCES_KEY = "seq".getBytes(HBaseUtils.ENCODING); - private final static int TABLES_TO_CACHE = 10; - // False positives are very bad here because they cause us to invalidate entries we shouldn't. - // Space used and # of hash functions grows in proportion to ln of num bits so a 10x increase - // in accuracy doubles the required space and number of hash functions. - private final static double STATS_BF_ERROR_RATE = 0.001; - - @VisibleForTesting final static String TEST_CONN = "test_connection"; - private static HBaseConnection testConn; - - static final private Logger LOG = LoggerFactory.getLogger(HBaseReadWrite.class.getName()); - - private static ThreadLocal self = new ThreadLocal() { - @Override - protected HBaseReadWrite initialValue() { - if (staticConf == null) { - throw new RuntimeException("Attempt to create HBaseReadWrite with no configuration set"); - } - return new HBaseReadWrite(staticConf); - } - }; - - private static boolean tablesCreated = false; - private static Configuration staticConf = null; - - private final Configuration conf; - private HBaseConnection conn; - private MessageDigest md; - private ObjectCache, Table> tableCache; - private ObjectCache sdCache; - private PartitionCache partCache; - private StatsCache statsCache; - private Counter tableHits; - private Counter tableMisses; - private Counter tableOverflows; - private Counter partHits; - private Counter partMisses; - private Counter partOverflows; - private Counter sdHits; - private Counter sdMisses; - private Counter sdOverflows; - private List counters; - // roleCache doesn't use ObjectCache because I don't want to limit the size. I am assuming - // that the number of roles will always be small (< 100) so caching the whole thing should not - // be painful. - private final Map roleCache; - boolean entireRoleTableInCache; - - /** - * Set the configuration for all HBaseReadWrite instances. - * @param configuration Configuration object - */ - public static synchronized void setConf(Configuration configuration) { - if (staticConf == null) { - staticConf = configuration; - } else { - LOG.info("Attempt to set conf when it has already been set."); - } - } - - /** - * Get the instance of HBaseReadWrite for the current thread. This can only be called after - * {@link #setConf} has been called. Woe betide you if that's not the case. - * @return thread's instance of HBaseReadWrite - */ - static HBaseReadWrite getInstance() { - if (staticConf == null) { - throw new RuntimeException("Must set conf object before getting an instance"); - } - return self.get(); - } - - public Configuration getConf() { - return conf; - } - - private HBaseReadWrite(Configuration configuration) { - conf = configuration; - HBaseConfiguration.addHbaseResources(conf); - - try { - String connClass = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS); - if (TEST_CONN.equals(connClass)) { - conn = testConn; - LOG.debug("Using test connection."); - } else { - LOG.debug("Instantiating connection class " + connClass); - Class c = Class.forName(connClass); - Object o = c.newInstance(); - if (HBaseConnection.class.isAssignableFrom(o.getClass())) { - conn = (HBaseConnection) o; - } else { - throw new IOException(connClass + " is not an instance of HBaseConnection."); - } - conn.setConf(conf); - conn.connect(); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - - try { - md = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - int totalCatalogObjectsToCache = - HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE); - - tableHits = new Counter("table cache hits"); - tableMisses = new Counter("table cache misses"); - tableOverflows = new Counter("table cache overflows"); - partHits = new Counter("partition cache hits"); - partMisses = new Counter("partition cache misses"); - partOverflows = new Counter("partition cache overflows"); - sdHits = new Counter("storage descriptor cache hits"); - sdMisses = new Counter("storage descriptor cache misses"); - sdOverflows = new Counter("storage descriptor cache overflows"); - counters = new ArrayList<>(); - counters.add(tableHits); - counters.add(tableMisses); - counters.add(tableOverflows); - counters.add(partHits); - counters.add(partMisses); - counters.add(partOverflows); - counters.add(sdHits); - counters.add(sdMisses); - counters.add(sdOverflows); - - // Give 1% of catalog cache space to storage descriptors - // (storage descriptors are shared, so 99% should be the same for a given table) - int sdsCacheSize = totalCatalogObjectsToCache / 100; - if (conf.getBoolean(NO_CACHE_CONF, false)) { - tableCache = new BogusObjectCache<>(); - sdCache = new BogusObjectCache<>(); - partCache = new BogusPartitionCache(); - } else { - tableCache = new ObjectCache<>(TABLES_TO_CACHE, tableHits, tableMisses, tableOverflows); - sdCache = new ObjectCache<>(sdsCacheSize, sdHits, sdMisses, sdOverflows); - partCache = new PartitionCache(totalCatalogObjectsToCache, partHits, partMisses, partOverflows); - } - statsCache = StatsCache.getInstance(conf); - roleCache = new HashMap<>(); - entireRoleTableInCache = false; - } - - // Synchronize this so not everyone's doing it at once. - static synchronized void createTablesIfNotExist() throws IOException { - if (!tablesCreated) { - for (String name : tableNames) { - if (self.get().conn.getHBaseTable(name, true) == null) { - List families = columnFamilies.get(name); - self.get().conn.createHBaseTable(name, families); - } - } - tablesCreated = true; - } - } - - /********************************************************************************************** - * Transaction related methods - *********************************************************************************************/ - - /** - * Begin a transaction - */ - void begin() { - try { - conn.beginTransaction(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - /** - * Commit a transaction - */ - void commit() { - try { - conn.commitTransaction(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - void rollback() { - try { - conn.rollbackTransaction(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - void close() throws IOException { - conn.close(); - } - - /********************************************************************************************** - * Database related methods - *********************************************************************************************/ - - /** - * Fetch a database object - * @param name name of the database to fetch - * @return the database object, or null if there is no such database - * @throws IOException - */ - Database getDb(String name) throws IOException { - byte[] key = HBaseUtils.buildKey(name); - byte[] serialized = read(DB_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeDatabase(name, serialized); - } - - /** - * Get a list of databases. - * @param regex Regular expression to use in searching for database names. It is expected to - * be a Java regular expression. If it is null then all databases will be returned. - * @return list of databases matching the regular expression. - * @throws IOException - */ - List scanDatabases(String regex) throws IOException { - Filter filter = null; - if (regex != null) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - } - Iterator iter = - scan(DB_TABLE, CATALOG_CF, CATALOG_COL, filter); - List databases = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - databases.add(HBaseUtils.deserializeDatabase(result.getRow(), - result.getValue(CATALOG_CF, CATALOG_COL))); - } - return databases; - } - - /** - * Store a database object - * @param database database object to store - * @throws IOException - */ - void putDb(Database database) throws IOException { - byte[][] serialized = HBaseUtils.serializeDatabase(database); - store(DB_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - } - - /** - * Drop a database - * @param name name of db to drop - * @throws IOException - */ - void deleteDb(String name) throws IOException { - byte[] key = HBaseUtils.buildKey(name); - delete(DB_TABLE, key, null, null); - } - - /** - * Print out the database. Intended for use by {@link org.apache.hadoop.hive.metastore.hbase.HBaseSchemaTool} - * @param name name of database to print - * @return string printout of database - */ - String printDatabase(String name) throws IOException, TException { - Database db = getDb(name); - if (db == null) return noSuch(name, "database"); - else return dumpThriftObject(db); - } - - /** - * Print out databases. - * @param regex regular to use to search for databases - * @return databases as a string, one each - * @throws IOException - * @throws TException - */ - List printDatabases(String regex) throws IOException, TException { - List dbs = scanDatabases(regex); - if (dbs.size() == 0) { - return noMatch(regex, "database"); - } else { - List lines = new ArrayList<>(); - for (Database db : dbs) lines.add(dumpThriftObject(db)); - return lines; - } - } - - int getDatabaseCount() throws IOException { - Filter fil = new FirstKeyOnlyFilter(); - Iterator iter = scan(DB_TABLE, fil); - return Iterators.size(iter); - } - - /********************************************************************************************** - * Function related methods - *********************************************************************************************/ - - /** - * Fetch a function object - * @param dbName name of the database the function is in - * @param functionName name of the function to fetch - * @return the function object, or null if there is no such function - * @throws IOException - */ - Function getFunction(String dbName, String functionName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, functionName); - byte[] serialized = read(FUNC_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeFunction(dbName, functionName, serialized); - } - - /** - * Get a list of functions. - * @param dbName Name of the database to search in. - * @param regex Regular expression to use in searching for function names. It is expected to - * be a Java regular expression. If it is null then all functions will be returned. - * @return list of functions matching the regular expression. - * @throws IOException - */ - List scanFunctions(String dbName, String regex) throws IOException { - byte[] keyPrefix = null; - if (dbName != null) { - keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName); - } - Filter filter = null; - if (regex != null) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - } - Iterator iter = - scan(FUNC_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), CATALOG_CF, CATALOG_COL, filter); - List functions = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - functions.add(HBaseUtils.deserializeFunction(result.getRow(), - result.getValue(CATALOG_CF, CATALOG_COL))); - } - return functions; - } - - /** - * Store a function object - * @param function function object to store - * @throws IOException - */ - void putFunction(Function function) throws IOException { - byte[][] serialized = HBaseUtils.serializeFunction(function); - store(FUNC_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - } - - /** - * Drop a function - * @param dbName name of database the function is in - * @param functionName name of function to drop - * @throws IOException - */ - void deleteFunction(String dbName, String functionName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, functionName); - delete(FUNC_TABLE, key, null, null); - } - - /** - * Print out a function - * @param key key to get the function, must include dbname. - * @return string of the function - * @throws IOException - * @throws TException - */ - String printFunction(String key) throws IOException, TException { - byte[] k = HBaseUtils.buildKey(key); - byte[] serialized = read(FUNC_TABLE, k, CATALOG_CF, CATALOG_COL); - if (serialized == null) return noSuch(key, "function"); - Function func = HBaseUtils.deserializeFunction(k, serialized); - return dumpThriftObject(func); - } - - /** - * Print out functions - * @param regex regular expression to use in matching functions - * @return list of strings, one function each - * @throws IOException - * @throws TException - */ - List printFunctions(String regex) throws IOException, TException { - Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - Iterator iter = scan(FUNC_TABLE, null, null, CATALOG_CF, CATALOG_COL, filter); - List lines = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - lines.add(dumpThriftObject(HBaseUtils.deserializeFunction(result.getRow(), - result.getValue(CATALOG_CF, CATALOG_COL)))); - } - if (lines.size() == 0) lines = noMatch(regex, "function"); - return lines; - } - - /********************************************************************************************** - * Global privilege related methods - *********************************************************************************************/ - - /** - * Fetch the global privileges object - * @return - * @throws IOException - */ - PrincipalPrivilegeSet getGlobalPrivs() throws IOException { - byte[] key = GLOBAL_PRIVS_KEY; - byte[] serialized = read(GLOBAL_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - return HBaseUtils.deserializePrincipalPrivilegeSet(serialized); - } - - /** - * Store the global privileges object - * @throws IOException - */ - void putGlobalPrivs(PrincipalPrivilegeSet privs) throws IOException { - byte[] key = GLOBAL_PRIVS_KEY; - byte[] serialized = HBaseUtils.serializePrincipalPrivilegeSet(privs); - store(GLOBAL_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - } - - /** - * Print out the global privileges. - * @return string containing the global privileges - * @throws IOException - * @throws TException - */ - String printGlobalPrivs() throws IOException, TException { - PrincipalPrivilegeSet pps = getGlobalPrivs(); - if (pps == null) return "No global privileges"; - else return dumpThriftObject(pps); - } - - /********************************************************************************************** - * Partition related methods - *********************************************************************************************/ - - /** - * Fetch one partition - * @param dbName database table is in - * @param tableName table partition is in - * @param partVals list of values that specify the partition, given in the same order as the - * columns they belong to - * @return The partition objec,t or null if there is no such partition - * @throws IOException - */ - Partition getPartition(String dbName, String tableName, List partVals) - throws IOException { - return getPartition(dbName, tableName, partVals, true); - } - - /** - * Get a set of specific partitions. This cannot be used to do a scan, each partition must be - * completely specified. This does not use the partition cache. - * @param dbName database table is in - * @param tableName table partitions are in - * @param partValLists list of list of values, each list should uniquely identify one partition - * @return a list of partition objects. - * @throws IOException - */ - List getPartitions(String dbName, String tableName, List partTypes, - List> partValLists) throws IOException { - List parts = new ArrayList<>(partValLists.size()); - List gets = new ArrayList<>(partValLists.size()); - for (List partVals : partValLists) { - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); - Get get = new Get(key); - get.addColumn(CATALOG_CF, CATALOG_COL); - gets.add(get); - } - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - Result[] results = htab.get(gets); - for (int i = 0; i < results.length; i++) { - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializePartition(dbName, tableName, partValLists.get(i), - results[i].getValue(CATALOG_CF, CATALOG_COL)); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - parts.add(sdParts.containingPartition); - } - - return parts; - } - - /** - * Add a partition. This should only be called for new partitions. For altering existing - * partitions this should not be called as it will blindly increment the ref counter for the - * storage descriptor. - * @param partition partition object to add - * @throws IOException - */ - void putPartition(Partition partition) throws IOException { - byte[] hash = putStorageDescriptor(partition.getSd()); - byte[][] serialized = HBaseUtils.serializePartition(partition, - HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), hash); - store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - partCache.put(partition.getDbName(), partition.getTableName(), partition); - } - - /** - * Replace an existing partition. - * @param oldPart partition to be replaced - * @param newPart partitiion to replace it with - * @throws IOException - */ - void replacePartition(Partition oldPart, Partition newPart, List partTypes) throws IOException { - byte[] hash; - byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldPart.getSd(), md); - byte[] newHash = HBaseUtils.hashStorageDescriptor(newPart.getSd(), md); - if (Arrays.equals(oldHash, newHash)) { - hash = oldHash; - } else { - decrementStorageDescriptorRefCount(oldPart.getSd()); - hash = putStorageDescriptor(newPart.getSd()); - } - byte[][] serialized = HBaseUtils.serializePartition(newPart, - HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); - store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - partCache.put(newPart.getDbName(), newPart.getTableName(), newPart); - if (!oldPart.getTableName().equals(newPart.getTableName())) { - deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues()); - } - } - - /** - * Add a group of partitions. This should only be used when all partitions are new. It - * blindly increments the ref count on the storage descriptor. - * @param partitions list of partitions to add - * @throws IOException - */ - void putPartitions(List partitions) throws IOException { - List puts = new ArrayList<>(partitions.size()); - for (Partition partition : partitions) { - byte[] hash = putStorageDescriptor(partition.getSd()); - List partTypes = HBaseUtils.getPartitionKeyTypes( - getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()); - byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash); - Put p = new Put(serialized[0]); - p.add(CATALOG_CF, CATALOG_COL, serialized[1]); - puts.add(p); - partCache.put(partition.getDbName(), partition.getTableName(), partition); - } - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - htab.put(puts); - conn.flush(htab); - } - - void replacePartitions(List oldParts, List newParts, List oldPartTypes) throws IOException { - if (oldParts.size() != newParts.size()) { - throw new RuntimeException("Number of old and new partitions must match."); - } - List puts = new ArrayList<>(newParts.size()); - for (int i = 0; i < newParts.size(); i++) { - byte[] hash; - byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldParts.get(i).getSd(), md); - byte[] newHash = HBaseUtils.hashStorageDescriptor(newParts.get(i).getSd(), md); - if (Arrays.equals(oldHash, newHash)) { - hash = oldHash; - } else { - decrementStorageDescriptorRefCount(oldParts.get(i).getSd()); - hash = putStorageDescriptor(newParts.get(i).getSd()); - } - Partition newPart = newParts.get(i); - byte[][] serialized = HBaseUtils.serializePartition(newPart, - HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); - Put p = new Put(serialized[0]); - p.add(CATALOG_CF, CATALOG_COL, serialized[1]); - puts.add(p); - partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i)); - if (!newParts.get(i).getTableName().equals(oldParts.get(i).getTableName())) { - // We need to remove the old record as well. - deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(), oldPartTypes, - oldParts.get(i).getValues(), false); - } - } - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - htab.put(puts); - conn.flush(htab); - } - - /** - * Find all the partitions in a table. - * @param dbName name of the database the table is in - * @param tableName table name - * @param maxPartitions max partitions to fetch. If negative all partitions will be returned. - * @return List of partitions that match the criteria. - * @throws IOException - */ - List scanPartitionsInTable(String dbName, String tableName, int maxPartitions) - throws IOException { - if (maxPartitions < 0) maxPartitions = Integer.MAX_VALUE; - Collection cached = partCache.getAllForTable(dbName, tableName); - if (cached != null) { - return maxPartitions < cached.size() - ? new ArrayList<>(cached).subList(0, maxPartitions) - : new ArrayList<>(cached); - } - byte[] keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, new ArrayList(), - new ArrayList(), false); - List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix, - HBaseUtils.getEndPrefix(keyPrefix), -1, null); - partCache.put(dbName, tableName, parts, true); - return maxPartitions < parts.size() ? parts.subList(0, maxPartitions) : parts; - } - - /** - * Scan partitions based on partial key information. - * @param dbName name of database, required - * @param tableName name of table, required - * @param partVals partial specification of values. Any values that are unknown can instead be - * a '*'. For example, if a table had two partition columns date - * and region (in that order), and partitions ('today', 'na'), ('today', 'eu'), - * ('tomorrow', 'na'), ('tomorrow', 'eu') then passing ['today', '*'] would return - * ('today', 'na') and ('today', 'eu') while passing ['*', 'eu'] would return - * ('today', 'eu') and ('tomorrow', 'eu'). Also the list can terminate early, - * which will be the equivalent of adding '*' for all non-included values. - * I.e. ['today'] is the same as ['today', '*']. - * @param maxPartitions Maximum number of entries to return. - * @return list of partitions that match the specified information - * @throws IOException - * @throws org.apache.hadoop.hive.metastore.api.NoSuchObjectException if the table containing - * the partitions can't be found. - */ - List scanPartitions(String dbName, String tableName, List partVals, - int maxPartitions) throws IOException, NoSuchObjectException { - - PartitionScanInfo psi = scanPartitionsInternal(dbName, tableName, partVals, maxPartitions); - List parts = scanPartitionsWithFilter(dbName, tableName, psi.keyPrefix, - psi.endKeyPrefix, maxPartitions, psi.filter); - partCache.put(dbName, tableName, parts, false); - return parts; - } - - List scanPartitions(String dbName, String tableName, byte[] keyStart, byte[] keyEnd, - Filter filter, int maxPartitions) - throws IOException, NoSuchObjectException { - byte[] startRow = keyStart; - byte[] endRow; - if (keyEnd == null || keyEnd.length == 0) { - // stop when current db+table entries are over - endRow = HBaseUtils.getEndPrefix(startRow); - } else { - endRow = keyEnd; - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Scanning partitions with start row <" + new String(startRow) + "> and end row <" - + new String(endRow) + ">"); - } - return scanPartitionsWithFilter(dbName, tableName, startRow, endRow, maxPartitions, filter); - } - - /** - * Delete a partition - * @param dbName database name that table is in - * @param tableName table partition is in - * @param partVals partition values that define this partition, in the same order as the - * partition columns they are values for - * @throws IOException - */ - void deletePartition(String dbName, String tableName, List partTypes, - List partVals) throws IOException { - deletePartition(dbName, tableName, partTypes, partVals, true); - } - - /** - * Print out a partition. - * @param partKey The key for the partition. This must include dbname.tablename._partkeys_ - * where _partkeys_ is a dot separated list of partition values in the proper - * order. - * @return string containing the partition - * @throws IOException - * @throws TException - */ - String printPartition(String partKey) throws IOException, TException { - // First figure out the table and fetch it - String[] partKeyParts = partKey.split(HBaseUtils.KEY_SEPARATOR_STR); - if (partKeyParts.length < 3) return noSuch(partKey, "partition"); - Table table = getTable(partKeyParts[0], partKeyParts[1]); - if (table == null) return noSuch(partKey, "partition"); - - byte[] key = HBaseUtils.buildPartitionKey(partKeyParts[0], partKeyParts[1], - HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), - Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length))); - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - Get g = new Get(key); - g.addColumn(CATALOG_CF, CATALOG_COL); - g.addFamily(STATS_CF); - Result result = htab.get(g); - if (result.isEmpty()) return noSuch(partKey, "partition"); - return printOnePartition(result); - } - - /** - * Print partitions - * @param partKey a partial partition key. This must match the beginings of the partition key. - * It can be just dbname.tablename, or dbname.table.pval... where pval are the - * partition values in order. They must be in the correct order and they must - * be literal values (no regular expressions) - * @return partitions as strings - * @throws IOException - * @throws TException - */ - List printPartitions(String partKey) throws IOException, TException { - // First figure out the table and fetch it - // Split on dot here rather than the standard separator because this will be passed in as a - // regex, even though we aren't fully supporting regex's. - String[] partKeyParts = partKey.split("\\."); - if (partKeyParts.length < 2) return noMatch(partKey, "partition"); - List partVals = partKeyParts.length == 2 ? Arrays.asList("*") : - Arrays.asList(Arrays.copyOfRange(partKeyParts, 2, partKeyParts.length)); - PartitionScanInfo psi; - try { - psi = - scanPartitionsInternal(partKeyParts[0], partKeyParts[1], partVals, -1); - } catch (NoSuchObjectException e) { - return noMatch(partKey, "partition"); - } - - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - Scan scan = new Scan(); - scan.addColumn(CATALOG_CF, CATALOG_COL); - scan.addFamily(STATS_CF); - scan.setStartRow(psi.keyPrefix); - scan.setStopRow(psi.endKeyPrefix); - scan.setFilter(psi.filter); - Iterator iter = htab.getScanner(scan).iterator(); - if (!iter.hasNext()) return noMatch(partKey, "partition"); - List lines = new ArrayList<>(); - while (iter.hasNext()) { - lines.add(printOnePartition(iter.next())); - } - return lines; - } - - int getPartitionCount() throws IOException { - Filter fil = new FirstKeyOnlyFilter(); - Iterator iter = scan(PART_TABLE, fil); - return Iterators.size(iter); - } - - private String printOnePartition(Result result) throws IOException, TException { - byte[] key = result.getRow(); - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializePartition(key, result.getValue(CATALOG_CF, CATALOG_COL), this); - StringBuilder builder = new StringBuilder(); - builder.append(dumpThriftObject(sdParts.containingPartition)) - .append(" sdHash: ") - .append(Base64.encodeBase64URLSafeString(sdParts.sdHash)) - .append(" stats:"); - NavigableMap statsCols = result.getFamilyMap(STATS_CF); - for (Map.Entry statsCol : statsCols.entrySet()) { - builder.append(" column ") - .append(new String(statsCol.getKey(), HBaseUtils.ENCODING)) - .append(": "); - ColumnStatistics pcs = buildColStats(key, false); - ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(pcs, statsCol.getValue()); - builder.append(dumpThriftObject(cso)); - } - return builder.toString(); - } - - private void deletePartition(String dbName, String tableName, List partTypes, - List partVals, boolean decrementRefCnt) throws IOException { - // Find the partition so I can get the storage descriptor and drop it - partCache.remove(dbName, tableName, partVals); - if (decrementRefCnt) { - Partition p = getPartition(dbName, tableName, partVals, false); - decrementStorageDescriptorRefCount(p.getSd()); - } - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); - delete(PART_TABLE, key, null, null); - } - - private Partition getPartition(String dbName, String tableName, List partVals, - boolean populateCache) throws IOException { - Partition cached = partCache.get(dbName, tableName, partVals); - if (cached != null) return cached; - byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, - HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals); - byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializePartition(dbName, tableName, partVals, serialized); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - if (populateCache) partCache.put(dbName, tableName, sdParts.containingPartition); - return sdParts.containingPartition; - } - - - private static class PartitionScanInfo { - final String dbName; - final String tableName; - final byte[] keyPrefix; - final byte[] endKeyPrefix; - final int maxPartitions; - final Filter filter; - - PartitionScanInfo(String d, String t, byte[] k, byte[] e, int m, Filter f) { - dbName = d; - tableName = t; - keyPrefix = k; - endKeyPrefix = e; - maxPartitions = m; - filter = f; - } - - @Override - public String toString() { - return new StringBuilder("dbName:") - .append(dbName) - .append(" tableName:") - .append(tableName) - .append(" keyPrefix:") - .append(Base64.encodeBase64URLSafeString(keyPrefix)) - .append(" endKeyPrefix:") - .append(Base64.encodeBase64URLSafeString(endKeyPrefix)) - .append(" maxPartitions:") - .append(maxPartitions) - .append(" filter:") - .append(filter.toString()) - .toString(); - } - } - - private PartitionScanInfo scanPartitionsInternal(String dbName, String tableName, - List partVals, int maxPartitions) - throws IOException, NoSuchObjectException { - // First, build as much of the key as we can so that we make the scan as tight as possible. - List keyElements = new ArrayList<>(); - keyElements.add(dbName); - keyElements.add(tableName); - - int firstStar = -1; - for (int i = 0; i < partVals.size(); i++) { - if ("*".equals(partVals.get(i))) { - firstStar = i; - break; - } else { - // empty string equals to null partition, - // means star - if (partVals.get(i).equals("")) { - break; - } else { - keyElements.add(partVals.get(i)); - } - } - } - - byte[] keyPrefix; - // We need to fetch the table to determine if the user fully specified the partitions or - // not, as it affects how we build the key. - Table table = getTable(dbName, tableName); - if (table == null) { - throw new NoSuchObjectException("Unable to find table " + dbName + "." + tableName); - } - keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, - HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size()-2)), - keyElements.subList(2, keyElements.size())); - - // Now, build a filter out of the remaining keys - List ranges = new ArrayList(); - List ops = new ArrayList(); - if (!(partVals.size() == table.getPartitionKeys().size() && firstStar == -1)) { - - for (int i = Math.max(0, firstStar); - i < table.getPartitionKeys().size() && i < partVals.size(); i++) { - - if ("*".equals(partVals.get(i))) { - PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator( - PartitionKeyComparator.Operator.Type.LIKE, - table.getPartitionKeys().get(i).getName(), - ".*"); - ops.add(op); - } else { - PartitionKeyComparator.Range range = new PartitionKeyComparator.Range( - table.getPartitionKeys().get(i).getName(), - new PartitionKeyComparator.Mark(partVals.get(i), true), - new PartitionKeyComparator.Mark(partVals.get(i), true)); - ranges.add(range); - } - } - } - - Filter filter = null; - if (!ranges.isEmpty() || !ops.isEmpty()) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator( - StringUtils.join(HBaseUtils.getPartitionNames(table.getPartitionKeys()), ","), - StringUtils.join(HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), ","), - ranges, ops)); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Scanning partitions with prefix <" + new String(keyPrefix) + "> and filter <" + - filter + ">"); - } - - return new PartitionScanInfo(dbName, tableName, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), - maxPartitions, filter); - } - - private List scanPartitionsWithFilter(String dbName, String tableName, - byte[] startRow, byte [] endRow, int maxResults, - Filter filter) throws IOException { - Iterator iter = - scan(PART_TABLE, startRow, endRow, CATALOG_CF, CATALOG_COL, filter); - List tablePartitions = getTable(dbName, tableName).getPartitionKeys(); - List parts = new ArrayList<>(); - int numToFetch = maxResults < 0 ? Integer.MAX_VALUE : maxResults; - for (int i = 0; i < numToFetch && iter.hasNext(); i++) { - Result result = iter.next(); - HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(dbName, tableName, - tablePartitions, result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL), conf); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - parts.add(sdParts.containingPartition); - } - return parts; - } - - /********************************************************************************************** - * Role related methods - *********************************************************************************************/ - - /** - * Fetch the list of all roles for a user - * @param userName name of the user - * @return the list of all roles this user participates in - * @throws IOException - */ - List getUserRoles(String userName) throws IOException { - byte[] key = HBaseUtils.buildKey(userName); - byte[] serialized = read(USER_TO_ROLE_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeRoleList(serialized); - } - - /** - * Find all roles directly participated in by a given principal. This builds the role cache - * because it assumes that subsequent calls may be made to find roles participated in indirectly. - * @param name username or role name - * @param type user or role - * @return map of role name to grant info for all roles directly participated in. - */ - List getPrincipalDirectRoles(String name, PrincipalType type) - throws IOException { - buildRoleCache(); - - Set rolesFound = new HashSet<>(); - for (Map.Entry e : roleCache.entrySet()) { - for (HbaseMetastoreProto.RoleGrantInfo giw : e.getValue().getGrantInfoList()) { - if (HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()) == type && - giw.getPrincipalName().equals(name)) { - rolesFound.add(e.getKey()); - break; - } - } - } - List directRoles = new ArrayList<>(rolesFound.size()); - List gets = new ArrayList<>(); - HTableInterface htab = conn.getHBaseTable(ROLE_TABLE); - for (String roleFound : rolesFound) { - byte[] key = HBaseUtils.buildKey(roleFound); - Get g = new Get(key); - g.addColumn(CATALOG_CF, CATALOG_COL); - gets.add(g); - } - - Result[] results = htab.get(gets); - for (int i = 0; i < results.length; i++) { - byte[] serialized = results[i].getValue(CATALOG_CF, CATALOG_COL); - if (serialized != null) { - directRoles.add(HBaseUtils.deserializeRole(results[i].getRow(), serialized)); - } - } - - return directRoles; - } - - /** - * Fetch all roles and users included directly in a given role. - * @param roleName name of the principal - * @return a list of all roles included in this role - * @throws IOException - */ - HbaseMetastoreProto.RoleGrantInfoList getRolePrincipals(String roleName) - throws IOException, NoSuchObjectException { - HbaseMetastoreProto.RoleGrantInfoList rolePrincipals = roleCache.get(roleName); - if (rolePrincipals != null) return rolePrincipals; - byte[] key = HBaseUtils.buildKey(roleName); - byte[] serialized = read(ROLE_TABLE, key, CATALOG_CF, ROLES_COL); - if (serialized == null) return null; - rolePrincipals = HbaseMetastoreProto.RoleGrantInfoList.parseFrom(serialized); - roleCache.put(roleName, rolePrincipals); - return rolePrincipals; - } - - /** - * Given a role, find all users who are either directly or indirectly participate in this role. - * This is expensive, it should be used sparingly. It scan the entire userToRole table and - * does a linear search on each entry. - * @param roleName name of the role - * @return set of all users in the role - * @throws IOException - */ - Set findAllUsersInRole(String roleName) throws IOException { - // Walk the userToRole table and collect every user that matches this role. - Set users = new HashSet<>(); - Iterator iter = scan(USER_TO_ROLE_TABLE, CATALOG_CF, CATALOG_COL); - while (iter.hasNext()) { - Result result = iter.next(); - List roleList = - HBaseUtils.deserializeRoleList(result.getValue(CATALOG_CF, CATALOG_COL)); - for (String rn : roleList) { - if (rn.equals(roleName)) { - users.add(new String(result.getRow(), HBaseUtils.ENCODING)); - break; - } - } - } - return users; - } - - /** - * Add a principal to a role. - * @param roleName name of the role to add principal to - * @param grantInfo grant information for this principal. - * @throws java.io.IOException - * @throws NoSuchObjectException - * - */ - void addPrincipalToRole(String roleName, HbaseMetastoreProto.RoleGrantInfo grantInfo) - throws IOException, NoSuchObjectException { - HbaseMetastoreProto.RoleGrantInfoList proto = getRolePrincipals(roleName); - List rolePrincipals = new ArrayList<>(); - if (proto != null) { - rolePrincipals.addAll(proto.getGrantInfoList()); - } - - rolePrincipals.add(grantInfo); - proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder() - .addAllGrantInfo(rolePrincipals) - .build(); - byte[] key = HBaseUtils.buildKey(roleName); - store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, proto.toByteArray()); - roleCache.put(roleName, proto); - } - - /** - * Drop a principal from a role. - * @param roleName Name of the role to drop the principal from - * @param principalName name of the principal to drop from the role - * @param type user or role - * @param grantOnly if this is true, just remove the grant option, don't actually remove the - * user from the role. - * @throws NoSuchObjectException - * @throws IOException - */ - void dropPrincipalFromRole(String roleName, String principalName, PrincipalType type, - boolean grantOnly) - throws NoSuchObjectException, IOException { - HbaseMetastoreProto.RoleGrantInfoList proto = getRolePrincipals(roleName); - if (proto == null) return; - List rolePrincipals = new ArrayList<>(); - rolePrincipals.addAll(proto.getGrantInfoList()); - - for (int i = 0; i < rolePrincipals.size(); i++) { - if (HBaseUtils.convertPrincipalTypes(rolePrincipals.get(i).getPrincipalType()) == type && - rolePrincipals.get(i).getPrincipalName().equals(principalName)) { - if (grantOnly) { - rolePrincipals.set(i, - HbaseMetastoreProto.RoleGrantInfo.newBuilder(rolePrincipals.get(i)) - .setGrantOption(false) - .build()); - } else { - rolePrincipals.remove(i); - } - break; - } - } - byte[] key = HBaseUtils.buildKey(roleName); - proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder() - .addAllGrantInfo(rolePrincipals) - .build(); - store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, proto.toByteArray()); - roleCache.put(roleName, proto); - } - - /** - * Rebuild the row for a given user in the USER_TO_ROLE table. This is expensive. It - * should be called as infrequently as possible. - * @param userName name of the user - * @throws IOException - */ - void buildRoleMapForUser(String userName) throws IOException, NoSuchObjectException { - // This is mega ugly. Hopefully we don't have to do this too often. - // First, scan the role table and put it all in memory - buildRoleCache(); - LOG.debug("Building role map for " + userName); - - // Second, find every role the user participates in directly. - Set rolesToAdd = new HashSet<>(); - Set rolesToCheckNext = new HashSet<>(); - for (Map.Entry e : roleCache.entrySet()) { - for (HbaseMetastoreProto.RoleGrantInfo grantInfo : e.getValue().getGrantInfoList()) { - if (HBaseUtils.convertPrincipalTypes(grantInfo.getPrincipalType()) == PrincipalType.USER && - userName .equals(grantInfo.getPrincipalName())) { - rolesToAdd.add(e.getKey()); - rolesToCheckNext.add(e.getKey()); - LOG.debug("Adding " + e.getKey() + " to list of roles user is in directly"); - break; - } - } - } - - // Third, find every role the user participates in indirectly (that is, they have been - // granted into role X and role Y has been granted into role X). - while (rolesToCheckNext.size() > 0) { - Set tmpRolesToCheckNext = new HashSet<>(); - for (String roleName : rolesToCheckNext) { - HbaseMetastoreProto.RoleGrantInfoList grantInfos = roleCache.get(roleName); - if (grantInfos == null) continue; // happens when a role contains no grants - for (HbaseMetastoreProto.RoleGrantInfo grantInfo : grantInfos.getGrantInfoList()) { - if (HBaseUtils.convertPrincipalTypes(grantInfo.getPrincipalType()) == PrincipalType.ROLE && - rolesToAdd.add(grantInfo.getPrincipalName())) { - tmpRolesToCheckNext.add(grantInfo.getPrincipalName()); - LOG.debug("Adding " + grantInfo.getPrincipalName() + - " to list of roles user is in indirectly"); - } - } - } - rolesToCheckNext = tmpRolesToCheckNext; - } - - byte[] key = HBaseUtils.buildKey(userName); - byte[] serialized = HBaseUtils.serializeRoleList(new ArrayList<>(rolesToAdd)); - store(USER_TO_ROLE_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - } - - /** - * Remove all of the grants for a role. This is not cheap. - * @param roleName Role to remove from all other roles and grants - * @throws IOException - */ - void removeRoleGrants(String roleName) throws IOException { - buildRoleCache(); - - List puts = new ArrayList<>(); - // First, walk the role table and remove any references to this role - for (Map.Entry e : roleCache.entrySet()) { - boolean madeAChange = false; - List rgil = new ArrayList<>(); - rgil.addAll(e.getValue().getGrantInfoList()); - for (int i = 0; i < rgil.size(); i++) { - if (HBaseUtils.convertPrincipalTypes(rgil.get(i).getPrincipalType()) == PrincipalType.ROLE && - rgil.get(i).getPrincipalName().equals(roleName)) { - rgil.remove(i); - madeAChange = true; - break; - } - } - if (madeAChange) { - Put put = new Put(HBaseUtils.buildKey(e.getKey())); - HbaseMetastoreProto.RoleGrantInfoList proto = - HbaseMetastoreProto.RoleGrantInfoList.newBuilder() - .addAllGrantInfo(rgil) - .build(); - put.add(CATALOG_CF, ROLES_COL, proto.toByteArray()); - puts.add(put); - roleCache.put(e.getKey(), proto); - } - } - - if (puts.size() > 0) { - HTableInterface htab = conn.getHBaseTable(ROLE_TABLE); - htab.put(puts); - conn.flush(htab); - } - - // Remove any global privileges held by this role - PrincipalPrivilegeSet global = getGlobalPrivs(); - if (global != null && - global.getRolePrivileges() != null && - global.getRolePrivileges().remove(roleName) != null) { - putGlobalPrivs(global); - } - - // Now, walk the db table - puts.clear(); - List dbs = scanDatabases(null); - if (dbs == null) dbs = new ArrayList<>(); // rare, but can happen - for (Database db : dbs) { - if (db.getPrivileges() != null && - db.getPrivileges().getRolePrivileges() != null && - db.getPrivileges().getRolePrivileges().remove(roleName) != null) { - byte[][] serialized = HBaseUtils.serializeDatabase(db); - Put put = new Put(serialized[0]); - put.add(CATALOG_CF, CATALOG_COL, serialized[1]); - puts.add(put); - } - } - - if (puts.size() > 0) { - HTableInterface htab = conn.getHBaseTable(DB_TABLE); - htab.put(puts); - conn.flush(htab); - } - - // Finally, walk the table table - puts.clear(); - for (Database db : dbs) { - List
tables = scanTables(db.getName(), null); - if (tables != null) { - for (Table table : tables) { - if (table.getPrivileges() != null && - table.getPrivileges().getRolePrivileges() != null && - table.getPrivileges().getRolePrivileges().remove(roleName) != null) { - byte[][] serialized = HBaseUtils.serializeTable(table, - HBaseUtils.hashStorageDescriptor(table.getSd(), md)); - Put put = new Put(serialized[0]); - put.add(CATALOG_CF, CATALOG_COL, serialized[1]); - puts.add(put); - } - } - } - } - - if (puts.size() > 0) { - HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); - htab.put(puts); - conn.flush(htab); - } - } - - /** - * Fetch a role - * @param roleName name of the role - * @return role object, or null if no such role - * @throws IOException - */ - Role getRole(String roleName) throws IOException { - byte[] key = HBaseUtils.buildKey(roleName); - byte[] serialized = read(ROLE_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeRole(roleName, serialized); - } - - /** - * Get a list of roles. - * @return list of all known roles. - * @throws IOException - */ - List scanRoles() throws IOException { - return scanRoles(null); - } - - /** - * Add a new role - * @param role role object - * @throws IOException - */ - void putRole(Role role) throws IOException { - byte[][] serialized = HBaseUtils.serializeRole(role); - store(ROLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - } - - /** - * Drop a role - * @param roleName name of role to drop - * @throws IOException - */ - void deleteRole(String roleName) throws IOException { - byte[] key = HBaseUtils.buildKey(roleName); - delete(ROLE_TABLE, key, null, null); - roleCache.remove(roleName); - } - - String printRolesForUser(String userName) throws IOException { - List roles = getUserRoles(userName); - if (roles == null || roles.size() == 0) return noSuch(userName, "user"); - return org.apache.commons.lang.StringUtils.join(roles, ','); - } - - List printRolesForUsers(String regex) throws IOException { - Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - Iterator iter = scan(USER_TO_ROLE_TABLE, null, null, CATALOG_CF, CATALOG_COL, filter); - List lines = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - lines.add(new String(result.getRow(), HBaseUtils.ENCODING) + ": " + - org.apache.commons.lang.StringUtils.join( - HBaseUtils.deserializeRoleList(result.getValue(CATALOG_CF, CATALOG_COL)), ',')); - } - if (lines.size() == 0) lines = noMatch(regex, "user"); - return lines; - } - - /** - * Print out a role - * @param name name of role to print - * @return string printout of role - */ - String printRole(String name) throws IOException, TException { - Role role = getRole(name); - if (role == null) return noSuch(name, "role"); - else return dumpThriftObject(role); - } - - /** - * Print out roles. - * @param regex regular to use to search for roles - * @return string printout of roles - * @throws IOException - * @throws TException - */ - List printRoles(String regex) throws IOException, TException { - List roles = scanRoles(regex); - if (roles.size() == 0) { - return noMatch(regex, "role"); - } else { - List lines = new ArrayList<>(); - for (Role role : roles) lines.add(dumpThriftObject(role)); - return lines; - } - } - - private List scanRoles(String regex) throws IOException { - Filter filter = null; - if (regex != null) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - } - Iterator iter = scan(ROLE_TABLE, null, null, CATALOG_CF, CATALOG_COL, filter); - List roles = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - roles.add(HBaseUtils.deserializeRole(result.getRow(), - result.getValue(CATALOG_CF, CATALOG_COL))); - } - return roles; - } - - private void buildRoleCache() throws IOException { - if (!entireRoleTableInCache) { - Iterator roles = scan(ROLE_TABLE, CATALOG_CF, ROLES_COL); - while (roles.hasNext()) { - Result res = roles.next(); - String roleName = new String(res.getRow(), HBaseUtils.ENCODING); - HbaseMetastoreProto.RoleGrantInfoList grantInfos = - HbaseMetastoreProto.RoleGrantInfoList.parseFrom(res.getValue(CATALOG_CF, ROLES_COL)); - roleCache.put(roleName, grantInfos); - } - entireRoleTableInCache = true; - } - } - - /********************************************************************************************** - * Table related methods - *********************************************************************************************/ - - /** - * Fetch a table object - * @param dbName database the table is in - * @param tableName table name - * @return Table object, or null if no such table - * @throws IOException - */ - Table getTable(String dbName, String tableName) throws IOException { - return getTable(dbName, tableName, true); - } - - /** - * Fetch a list of table objects. - * @param dbName Database that all fetched tables are in - * @param tableNames list of table names - * @return list of tables, in the same order as the provided names. - * @throws IOException - */ - List
getTables(String dbName, List tableNames) throws IOException { - // I could implement getTable in terms of this method. But it is such a core function - // that I don't want to slow it down for the much less common fetching of multiple tables. - List
results = new ArrayList<>(tableNames.size()); - ObjectPair[] hashKeys = new ObjectPair[tableNames.size()]; - boolean atLeastOneMissing = false; - for (int i = 0; i < tableNames.size(); i++) { - hashKeys[i] = new ObjectPair<>(dbName, tableNames.get(i)); - // The result may be null, but we still want to add it so that we have a slot in the list - // for it. - results.add(tableCache.get(hashKeys[i])); - if (results.get(i) == null) atLeastOneMissing = true; - } - if (!atLeastOneMissing) return results; - - // Now build a single get that will fetch the remaining tables - List gets = new ArrayList<>(); - HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); - for (int i = 0; i < tableNames.size(); i++) { - if (results.get(i) != null) continue; - byte[] key = HBaseUtils.buildKey(dbName, tableNames.get(i)); - Get g = new Get(key); - g.addColumn(CATALOG_CF, CATALOG_COL); - gets.add(g); - } - Result[] res = htab.get(gets); - for (int i = 0, nextGet = 0; i < tableNames.size(); i++) { - if (results.get(i) != null) continue; - byte[] serialized = res[nextGet++].getValue(CATALOG_CF, CATALOG_COL); - if (serialized != null) { - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializeTable(dbName, tableNames.get(i), serialized); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - tableCache.put(hashKeys[i], sdParts.containingTable); - results.set(i, sdParts.containingTable); - } - } - return results; - } - - /** - * Get a list of tables. - * @param dbName Database these tables are in - * @param regex Regular expression to use in searching for table names. It is expected to - * be a Java regular expression. If it is null then all tables in the indicated - * database will be returned. - * @return list of tables matching the regular expression. - * @throws IOException - */ - List
scanTables(String dbName, String regex) throws IOException { - // There's no way to know whether all the tables we are looking for are - // in the cache, so we would need to scan one way or another. Thus there's no value in hitting - // the cache for this function. - byte[] keyPrefix = null; - if (dbName != null) { - keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName); - } - Filter filter = null; - if (regex != null) { - filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - } - Iterator iter = - scan(TABLE_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), - CATALOG_CF, CATALOG_COL, filter); - List
tables = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializeTable(result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL)); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - tables.add(sdParts.containingTable); - } - return tables; - } - - /** - * Put a table object. This should only be called when the table is new (create table) as it - * will blindly add/increment the storage descriptor. If you are altering an existing table - * call {@link #replaceTable} instead. - * @param table table object - * @throws IOException - */ - void putTable(Table table) throws IOException { - byte[] hash = putStorageDescriptor(table.getSd()); - byte[][] serialized = HBaseUtils.serializeTable(table, hash); - store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - tableCache.put(new ObjectPair<>(table.getDbName(), table.getTableName()), table); - } - - /** - * Replace an existing table. This will also compare the storage descriptors and see if the - * reference count needs to be adjusted - * @param oldTable old version of the table - * @param newTable new version of the table - */ - void replaceTable(Table oldTable, Table newTable) throws IOException { - byte[] hash; - byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldTable.getSd(), md); - byte[] newHash = HBaseUtils.hashStorageDescriptor(newTable.getSd(), md); - if (Arrays.equals(oldHash, newHash)) { - hash = oldHash; - } else { - decrementStorageDescriptorRefCount(oldTable.getSd()); - hash = putStorageDescriptor(newTable.getSd()); - } - byte[][] serialized = HBaseUtils.serializeTable(newTable, hash); - store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - tableCache.put(new ObjectPair<>(newTable.getDbName(), newTable.getTableName()), newTable); - if (!oldTable.getTableName().equals(newTable.getTableName())) { - deleteTable(oldTable.getDbName(), oldTable.getTableName()); - } - } - - /** - * Delete a table - * @param dbName name of database table is in - * @param tableName table to drop - * @throws IOException - */ - void deleteTable(String dbName, String tableName) throws IOException { - deleteTable(dbName, tableName, true); - } - - /** - * Print out a table. - * @param name The name for the table. This must include dbname.tablename - * @return string containing the table - * @throws IOException - * @throws TException - */ - String printTable(String name) throws IOException, TException { - byte[] key = HBaseUtils.buildKey(name); - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); - Get g = new Get(key); - g.addColumn(CATALOG_CF, CATALOG_COL); - g.addFamily(STATS_CF); - Result result = htab.get(g); - if (result.isEmpty()) return noSuch(name, "table"); - return printOneTable(result); - } - - /** - * Print tables - * @param regex to use to find the tables. Remember that dbname is in each - * table name. - * @return tables as strings - * @throws IOException - * @throws TException - */ - List printTables(String regex) throws IOException, TException { - Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); - Scan scan = new Scan(); - scan.addColumn(CATALOG_CF, CATALOG_COL); - scan.addFamily(STATS_CF); - scan.setFilter(filter); - Iterator iter = htab.getScanner(scan).iterator(); - if (!iter.hasNext()) return noMatch(regex, "table"); - List lines = new ArrayList<>(); - while (iter.hasNext()) { - lines.add(printOneTable(iter.next())); - } - return lines; - } - - int getTableCount() throws IOException { - Filter fil = new FirstKeyOnlyFilter(); - Iterator iter = scan(TABLE_TABLE, fil); - return Iterators.size(iter); - } - - private String printOneTable(Result result) throws IOException, TException { - byte[] key = result.getRow(); - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializeTable(key, result.getValue(CATALOG_CF, CATALOG_COL)); - StringBuilder builder = new StringBuilder(); - builder.append(dumpThriftObject(sdParts.containingTable)) - .append(" sdHash: ") - .append(Base64.encodeBase64URLSafeString(sdParts.sdHash)) - .append(" stats:"); - NavigableMap statsCols = result.getFamilyMap(STATS_CF); - for (Map.Entry statsCol : statsCols.entrySet()) { - builder.append(" column ") - .append(new String(statsCol.getKey(), HBaseUtils.ENCODING)) - .append(": "); - ColumnStatistics pcs = buildColStats(key, true); - ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(pcs, statsCol.getValue()); - builder.append(dumpThriftObject(cso)); - } - // Add the primary key - List pk = getPrimaryKey(sdParts.containingTable.getDbName(), - sdParts.containingTable.getTableName()); - if (pk != null && pk.size() > 0) { - builder.append(" primary key: "); - for (SQLPrimaryKey pkcol : pk) builder.append(dumpThriftObject(pkcol)); - } - - // Add any foreign keys - List fks = getForeignKeys(sdParts.containingTable.getDbName(), - sdParts.containingTable.getTableName()); - if (fks != null && fks.size() > 0) { - builder.append(" foreign keys: "); - for (SQLForeignKey fkcol : fks) builder.append(dumpThriftObject(fkcol)); - - } - return builder.toString(); - } - - private void deleteTable(String dbName, String tableName, boolean decrementRefCnt) - throws IOException { - tableCache.remove(new ObjectPair<>(dbName, tableName)); - if (decrementRefCnt) { - // Find the table so I can get the storage descriptor and drop it - Table t = getTable(dbName, tableName, false); - decrementStorageDescriptorRefCount(t.getSd()); - } - byte[] key = HBaseUtils.buildKey(dbName, tableName); - delete(TABLE_TABLE, key, null, null); - } - - private Table getTable(String dbName, String tableName, boolean populateCache) - throws IOException { - ObjectPair hashKey = new ObjectPair<>(dbName, tableName); - Table cached = tableCache.get(hashKey); - if (cached != null) return cached; - byte[] key = HBaseUtils.buildKey(dbName, tableName); - byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializeTable(dbName, tableName, serialized); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - if (populateCache) tableCache.put(hashKey, sdParts.containingTable); - return sdParts.containingTable; - } - - /********************************************************************************************** - * Index related methods - *********************************************************************************************/ - - /** - * Put an index object. This should only be called when the index is new (create index) as it - * will blindly add/increment the storage descriptor. If you are altering an existing index - * call {@link #replaceIndex} instead. - * @param index index object - * @throws IOException - */ - void putIndex(Index index) throws IOException { - byte[] hash = putStorageDescriptor(index.getSd()); - byte[][] serialized = HBaseUtils.serializeIndex(index, hash); - store(INDEX_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - } - - /** - * Fetch an index object - * @param dbName database the table is in - * @param origTableName original table name - * @param indexName index name - * @return Index object, or null if no such table - * @throws IOException - */ - Index getIndex(String dbName, String origTableName, String indexName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, origTableName, indexName); - byte[] serialized = read(INDEX_TABLE, key, CATALOG_CF, CATALOG_COL); - if (serialized == null) return null; - HBaseUtils.StorageDescriptorParts sdParts = - HBaseUtils.deserializeIndex(dbName, origTableName, indexName, serialized); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - return sdParts.containingIndex; - } - - /** - * Delete a table - * @param dbName name of database table is in - * @param origTableName table the index is built on - * @param indexName index name - * @throws IOException - */ - void deleteIndex(String dbName, String origTableName, String indexName) throws IOException { - deleteIndex(dbName, origTableName, indexName, true); - } - - void deleteIndex(String dbName, String origTableName, String indexName, boolean decrementRefCnt) - throws IOException { - // Find the index so I can get the storage descriptor and drop it - if (decrementRefCnt) { - Index index = getIndex(dbName, origTableName, indexName); - decrementStorageDescriptorRefCount(index.getSd()); - } - byte[] key = HBaseUtils.buildKey(dbName, origTableName, indexName); - delete(INDEX_TABLE, key, null, null); - } - - /** - * Get a list of tables. - * @param dbName Database these tables are in - * @param origTableName original table name - * @param maxResults max indexes to fetch. If negative all indexes will be returned. - * @return list of indexes of the table - * @throws IOException - */ - List scanIndexes(String dbName, String origTableName, int maxResults) throws IOException { - // There's no way to know whether all the tables we are looking for are - // in the cache, so we would need to scan one way or another. Thus there's no value in hitting - // the cache for this function. - byte[] keyPrefix = null; - if (dbName != null) { - keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, origTableName); - } - Iterator iter = scan(INDEX_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), - CATALOG_CF, CATALOG_COL, null); - List indexes = new ArrayList<>(); - int numToFetch = maxResults < 0 ? Integer.MAX_VALUE : maxResults; - for (int i = 0; i < numToFetch && iter.hasNext(); i++) { - Result result = iter.next(); - HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializeIndex(result.getRow(), - result.getValue(CATALOG_CF, CATALOG_COL)); - StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); - HBaseUtils.assembleStorageDescriptor(sd, sdParts); - indexes.add(sdParts.containingIndex); - } - return indexes; - } - - /** - * Replace an existing index. This will also compare the storage descriptors and see if the - * reference count needs to be adjusted - * @param oldIndex old version of the index - * @param newIndex new version of the index - */ - void replaceIndex(Index oldIndex, Index newIndex) throws IOException { - byte[] hash; - byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldIndex.getSd(), md); - byte[] newHash = HBaseUtils.hashStorageDescriptor(newIndex.getSd(), md); - if (Arrays.equals(oldHash, newHash)) { - hash = oldHash; - } else { - decrementStorageDescriptorRefCount(oldIndex.getSd()); - hash = putStorageDescriptor(newIndex.getSd()); - } - byte[][] serialized = HBaseUtils.serializeIndex(newIndex, hash); - store(INDEX_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); - if (!(oldIndex.getDbName().equals(newIndex.getDbName()) && - oldIndex.getOrigTableName().equals(newIndex.getOrigTableName()) && - oldIndex.getIndexName().equals(newIndex.getIndexName()))) { - deleteIndex(oldIndex.getDbName(), oldIndex.getOrigTableName(), oldIndex.getIndexName(), false); - } - } - /********************************************************************************************** - * StorageDescriptor related methods - *********************************************************************************************/ - - /** - * If this serde has already been read, then return it from the cache. If not, read it, then - * return it. - * @param hash hash of the storage descriptor to read - * @return the storage descriptor - * @throws IOException - */ - StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { - ByteArrayWrapper hashKey = new ByteArrayWrapper(hash); - StorageDescriptor cached = sdCache.get(hashKey); - if (cached != null) return cached; - LOG.debug("Not found in cache, looking in hbase"); - byte[] serialized = read(SD_TABLE, hash, CATALOG_CF, CATALOG_COL); - if (serialized == null) { - throw new RuntimeException("Woh, bad! Trying to fetch a non-existent storage descriptor " + - "from hash " + Base64.encodeBase64String(hash)); - } - StorageDescriptor sd = HBaseUtils.deserializeStorageDescriptor(serialized); - sdCache.put(hashKey, sd); - return sd; - } - - /** - * Lower the reference count on the storage descriptor by one. If it goes to zero, then it - * will be deleted. - * @param sd Storage descriptor - * @throws IOException - */ - void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException { - byte[] key = HBaseUtils.hashStorageDescriptor(sd, md); - byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); - if (serializedRefCnt == null) { - // Someone deleted it before we got to it, no worries - return; - } - int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING)); - HTableInterface htab = conn.getHBaseTable(SD_TABLE); - if (--refCnt < 1) { - Delete d = new Delete(key); - // We don't use checkAndDelete here because it isn't compatible with the transaction - // managers. If the transaction managers are doing their jobs then we should not need it - // anyway. - htab.delete(d); - sdCache.remove(new ByteArrayWrapper(key)); - } else { - Put p = new Put(key); - p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); - htab.put(p); - conn.flush(htab); - } - } - - /** - * Place the common parts of a storage descriptor into the cache and write the storage - * descriptor out to HBase. This should only be called if you are sure that the storage - * descriptor needs to be added. If you have changed a table or partition but not it's storage - * descriptor do not call this method, as it will increment the reference count of the storage - * descriptor. - * @param storageDescriptor storage descriptor to store. - * @return id of the entry in the cache, to be written in for the storage descriptor - */ - byte[] putStorageDescriptor(StorageDescriptor storageDescriptor) throws IOException { - byte[] sd = HBaseUtils.serializeStorageDescriptor(storageDescriptor); - byte[] key = HBaseUtils.hashStorageDescriptor(storageDescriptor, md); - byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); - HTableInterface htab = conn.getHBaseTable(SD_TABLE); - if (serializedRefCnt == null) { - // We are the first to put it in the DB - Put p = new Put(key); - p.add(CATALOG_CF, CATALOG_COL, sd); - p.add(CATALOG_CF, REF_COUNT_COL, "1".getBytes(HBaseUtils.ENCODING)); - htab.put(p); - sdCache.put(new ByteArrayWrapper(key), storageDescriptor); - } else { - // Just increment the reference count - int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1; - Put p = new Put(key); - p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); - htab.put(p); - } - conn.flush(htab); - return key; - } - - /** - * Print out a storage descriptor. - * @param hash hash that is the key of the storage descriptor - * @return string version of the storage descriptor - */ - String printStorageDescriptor(byte[] hash) throws IOException, TException { - byte[] serialized = read(SD_TABLE, hash, CATALOG_CF, CATALOG_COL); - if (serialized == null) return noSuch(Base64.encodeBase64URLSafeString(hash), "storage descriptor"); - return dumpThriftObject(HBaseUtils.deserializeStorageDescriptor(serialized)); - } - - /** - * Print all of the storage descriptors. This doesn't take a regular expression since the key - * is an md5 hash and it's hard to see how a regex on this would be useful. - * @return list of all storage descriptors as strings - * @throws IOException - * @throws TException - */ - List printStorageDescriptors() throws IOException, TException { - Iterator results = scan(SD_TABLE, CATALOG_CF, CATALOG_COL); - if (!results.hasNext()) return Arrays.asList("No storage descriptors"); - List lines = new ArrayList<>(); - while (results.hasNext()) { - Result result = results.next(); - lines.add(Base64.encodeBase64URLSafeString(result.getRow()) + ": " + - dumpThriftObject(HBaseUtils.deserializeStorageDescriptor(result.getValue(CATALOG_CF, - CATALOG_COL)))); - } - return lines; - } - - private static class ByteArrayWrapper { - byte[] wrapped; - - ByteArrayWrapper(byte[] b) { - wrapped = b; - } - - @Override - public boolean equals(Object other) { - if (other instanceof ByteArrayWrapper) { - return Arrays.equals(((ByteArrayWrapper)other).wrapped, wrapped); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Arrays.hashCode(wrapped); - } - } - - /********************************************************************************************** - * Statistics related methods - *********************************************************************************************/ - - /** - * Update statistics for one or more columns for a table or a partition. - * - * @param dbName database the table is in - * @param tableName table to update statistics for - * @param partVals partition values that define partition to update statistics for. If this is - * null, then these will be assumed to be table level statistics - * @param stats Stats object with stats for one or more columns - * @throws IOException - */ - void updateStatistics(String dbName, String tableName, List partVals, - ColumnStatistics stats) throws IOException { - byte[] key = getStatisticsKey(dbName, tableName, partVals); - String hbaseTable = getStatisticsTable(partVals); - byte[][] colnames = new byte[stats.getStatsObjSize()][]; - byte[][] serialized = new byte[stats.getStatsObjSize()][]; - for (int i = 0; i < stats.getStatsObjSize(); i++) { - ColumnStatisticsObj obj = stats.getStatsObj().get(i); - serialized[i] = HBaseUtils.serializeStatsForOneColumn(stats, obj); - String colname = obj.getColName(); - colnames[i] = HBaseUtils.buildKey(colname); - } - store(hbaseTable, key, STATS_CF, colnames, serialized); - } - - /** - * Get statistics for a table - * - * @param dbName name of database table is in - * @param tblName name of table - * @param colNames list of column names to get statistics for - * @return column statistics for indicated table - * @throws IOException - */ - ColumnStatistics getTableStatistics(String dbName, String tblName, List colNames) - throws IOException { - byte[] tabKey = HBaseUtils.buildKey(dbName, tblName); - ColumnStatistics tableStats = new ColumnStatistics(); - ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); - statsDesc.setIsTblLevel(true); - statsDesc.setDbName(dbName); - statsDesc.setTableName(tblName); - tableStats.setStatsDesc(statsDesc); - byte[][] colKeys = new byte[colNames.size()][]; - for (int i = 0; i < colKeys.length; i++) { - colKeys[i] = HBaseUtils.buildKey(colNames.get(i)); - } - Result result = read(TABLE_TABLE, tabKey, STATS_CF, colKeys); - for (int i = 0; i < colKeys.length; i++) { - byte[] serializedColStats = result.getValue(STATS_CF, colKeys[i]); - if (serializedColStats == null) { - // There were no stats for this column, so skip it - continue; - } - ColumnStatisticsObj obj = - HBaseUtils.deserializeStatsForOneColumn(tableStats, serializedColStats); - obj.setColName(colNames.get(i)); - tableStats.addToStatsObj(obj); - } - return tableStats; - } - - /** - * Get statistics for a set of partitions - * - * @param dbName name of database table is in - * @param tblName table partitions are in - * @param partNames names of the partitions, used only to set values inside the return stats - * objects - * @param partVals partition values for each partition, needed because this class doesn't know how - * to translate from partName to partVals - * @param colNames column names to fetch stats for. These columns will be fetched for all - * requested partitions - * @return list of ColumnStats, one for each partition for which we found at least one column's - * stats. - * @throws IOException - */ - List getPartitionStatistics(String dbName, String tblName, - List partNames, List> partVals, List colNames) - throws IOException { - List statsList = new ArrayList<>(partNames.size()); - Map, String> valToPartMap = new HashMap<>(partNames.size()); - List gets = new ArrayList<>(partNames.size() * colNames.size()); - assert partNames.size() == partVals.size(); - - byte[][] colNameBytes = new byte[colNames.size()][]; - for (int i = 0; i < colNames.size(); i++) { - colNameBytes[i] = HBaseUtils.buildKey(colNames.get(i)); - } - - for (int i = 0; i < partNames.size(); i++) { - valToPartMap.put(partVals.get(i), partNames.get(i)); - byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, - HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), - partVals.get(i)); - Get get = new Get(partKey); - for (byte[] colName : colNameBytes) { - get.addColumn(STATS_CF, colName); - } - gets.add(get); - } - - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - Result[] results = htab.get(gets); - for (int i = 0; i < results.length; i++) { - ColumnStatistics colStats = null; - for (int j = 0; j < colNameBytes.length; j++) { - byte[] serializedColStats = results[i].getValue(STATS_CF, colNameBytes[j]); - if (serializedColStats != null) { - if (colStats == null) { - // We initialize this late so that we don't create extras in the case of - // partitions with no stats - colStats = buildColStats(results[i].getRow(), false); - statsList.add(colStats); - } - ColumnStatisticsObj cso = - HBaseUtils.deserializeStatsForOneColumn(colStats, serializedColStats); - cso.setColName(colNames.get(j)); - colStats.addToStatsObj(cso); - } - } - } - - return statsList; - } - - /** - * Get a reference to the stats cache. - * @return the stats cache. - */ - StatsCache getStatsCache() { - return statsCache; - } - - /** - * Get aggregated stats. Only intended for use by - * {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. Others should not call directly - * but should call StatsCache.get instead. - * @param key The md5 hash associated with this partition set - * @return stats if hbase has them, else null - * @throws IOException - */ - AggrStats getAggregatedStats(byte[] key) throws IOException{ - byte[] serialized = read(AGGR_STATS_TABLE, key, CATALOG_CF, AGGR_STATS_STATS_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeAggrStats(serialized); - - } - - /** - * Put aggregated stats Only intended for use by - * {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. Others should not call directly - * but should call StatsCache.put instead. - * @param key The md5 hash associated with this partition set - * @param dbName Database these partitions are in - * @param tableName Table these partitions are in - * @param partNames Partition names - * @param colName Column stats are for - * @param stats Stats - * @throws IOException - */ - void putAggregatedStats(byte[] key, String dbName, String tableName, List partNames, - String colName, AggrStats stats) throws IOException { - // Serialize the part names - List protoNames = new ArrayList<>(partNames.size() + 3); - protoNames.add(dbName); - protoNames.add(tableName); - protoNames.add(colName); - protoNames.addAll(partNames); - // Build a bloom Filter for these partitions - BloomFilter bloom = new BloomFilter(partNames.size(), STATS_BF_ERROR_RATE); - for (String partName : partNames) { - bloom.add(partName.getBytes(HBaseUtils.ENCODING)); - } - byte[] serializedFilter = HBaseUtils.serializeBloomFilter(dbName, tableName, bloom); - - byte[] serializedStats = HBaseUtils.serializeAggrStats(stats); - store(AGGR_STATS_TABLE, key, CATALOG_CF, - new byte[][]{AGGR_STATS_BLOOM_COL, AGGR_STATS_STATS_COL}, - new byte[][]{serializedFilter, serializedStats}); - } - - // TODO - We shouldn't remove an entry from the cache as soon as a single partition is deleted. - // TODO - Instead we should keep track of how many partitions have been deleted and only remove - // TODO - an entry once it passes a certain threshold, like 5%, of partitions have been removed. - // TODO - That requires moving this from a filter to a co-processor. - /** - * Invalidate stats associated with the listed partitions. This method is intended for use - * only by {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. - * @param filter serialized version of the filter to pass - * @return List of md5 hash keys for the partition stat sets that were removed. - * @throws IOException - */ - List - invalidateAggregatedStats(HbaseMetastoreProto.AggrStatsInvalidatorFilter filter) - throws IOException { - Iterator results = scan(AGGR_STATS_TABLE, new AggrStatsInvalidatorFilter(filter)); - if (!results.hasNext()) return Collections.emptyList(); - List deletes = new ArrayList<>(); - List keys = new ArrayList<>(); - while (results.hasNext()) { - Result result = results.next(); - deletes.add(new Delete(result.getRow())); - keys.add(new StatsCache.StatsCacheKey(result.getRow())); - } - HTableInterface htab = conn.getHBaseTable(AGGR_STATS_TABLE); - htab.delete(deletes); - return keys; - } - - private byte[] getStatisticsKey(String dbName, String tableName, List partVals) throws IOException { - return partVals == null ? HBaseUtils.buildKey(dbName, tableName) : HBaseUtils - .buildPartitionKey(dbName, tableName, - HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), - partVals); - } - - private String getStatisticsTable(List partVals) { - return partVals == null ? TABLE_TABLE : PART_TABLE; - } - - private ColumnStatistics buildColStats(byte[] key, boolean fromTable) throws IOException { - // We initialize this late so that we don't create extras in the case of - // partitions with no stats - ColumnStatistics colStats = new ColumnStatistics(); - ColumnStatisticsDesc csd = new ColumnStatisticsDesc(); - - // If this is a table key, parse it as one - List reconstructedKey; - if (fromTable) { - reconstructedKey = Arrays.asList(HBaseUtils.deserializeKey(key)); - csd.setIsTblLevel(true); - } else { - reconstructedKey = HBaseUtils.deserializePartitionKey(key, this); - csd.setIsTblLevel(false); - } - csd.setDbName(reconstructedKey.get(0)); - csd.setTableName(reconstructedKey.get(1)); - if (!fromTable) { - // Build the part name, for which we need the table - Table table = getTable(reconstructedKey.get(0), reconstructedKey.get(1)); - if (table == null) { - throw new RuntimeException("Unable to find table " + reconstructedKey.get(0) + "." + - reconstructedKey.get(1) + " even though I have a partition for it!"); - } - csd.setPartName(HBaseStore.buildExternalPartName(table, reconstructedKey.subList(2, - reconstructedKey.size()))); - } - colStats.setStatsDesc(csd); - return colStats; - } - - /********************************************************************************************** - * File metadata related methods - *********************************************************************************************/ - - /** - * @param fileIds file ID list. - * @return Serialized file metadata. - */ - ByteBuffer[] getFileMetadata(List fileIds) throws IOException { - ByteBuffer[] result = new ByteBuffer[fileIds.size()]; - getFileMetadata(fileIds, result); - return result; - } - - /** - * @param fileIds file ID list. - */ - @Override - public void getFileMetadata(List fileIds, ByteBuffer[] result) throws IOException { - byte[][] keys = new byte[fileIds.size()][]; - for (int i = 0; i < fileIds.size(); ++i) { - keys[i] = HBaseUtils.makeLongKey(fileIds.get(i)); - } - multiRead(FILE_METADATA_TABLE, CATALOG_CF, CATALOG_COL, keys, result); - } - - /** - * @param fileIds file ID list. - * @param metadataBuffers Serialized file metadatas, one per file ID. - * @param addedCols The column names for additional columns created by file-format-specific - * metadata handler, to be stored in the cache. - * @param addedVals The values for addedCols; one value per file ID per added column. - */ - @Override - public void storeFileMetadata(List fileIds, List metadataBuffers, - ByteBuffer[] addedCols, ByteBuffer[][] addedVals) - throws IOException, InterruptedException { - byte[][] keys = new byte[fileIds.size()][]; - for (int i = 0; i < fileIds.size(); ++i) { - keys[i] = HBaseUtils.makeLongKey(fileIds.get(i)); - } - // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer - // column name, but not column family. So there. Perhaps we should add these to constants too. - ByteBuffer colNameBuf = ByteBuffer.wrap(CATALOG_COL); - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE); - List actions = new ArrayList<>(keys.length); - for (int keyIx = 0; keyIx < keys.length; ++keyIx) { - ByteBuffer value = (metadataBuffers != null) ? metadataBuffers.get(keyIx) : null; - ByteBuffer[] av = addedVals == null ? null : addedVals[keyIx]; - if (value == null) { - actions.add(new Delete(keys[keyIx])); - assert av == null; - } else { - Put p = new Put(keys[keyIx]); - p.addColumn(CATALOG_CF, colNameBuf, HConstants.LATEST_TIMESTAMP, value); - if (av != null) { - assert av.length == addedCols.length; - for (int colIx = 0; colIx < addedCols.length; ++colIx) { - p.addColumn(STATS_CF, addedCols[colIx], HConstants.LATEST_TIMESTAMP, av[colIx]); - } - } - actions.add(p); - } - } - Object[] results = new Object[keys.length]; - htab.batch(actions, results); - // TODO: should we check results array? we don't care about partial results - conn.flush(htab); - } - - @Override - public void storeFileMetadata(long fileId, ByteBuffer metadata, - ByteBuffer[] addedCols, ByteBuffer[] addedVals) throws IOException, InterruptedException { - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(FILE_METADATA_TABLE); - Put p = new Put(HBaseUtils.makeLongKey(fileId)); - p.addColumn(CATALOG_CF, ByteBuffer.wrap(CATALOG_COL), HConstants.LATEST_TIMESTAMP, metadata); - assert (addedCols == null && addedVals == null) || (addedCols.length == addedVals.length); - if (addedCols != null) { - for (int i = 0; i < addedCols.length; ++i) { - p.addColumn(STATS_CF, addedCols[i], HConstants.LATEST_TIMESTAMP, addedVals[i]); - } - } - htab.put(p); - conn.flush(htab); - } - - /********************************************************************************************** - * Security related methods - *********************************************************************************************/ - - /** - * Fetch a delegation token - * @param tokId identifier of the token to fetch - * @return the delegation token, or null if there is no such delegation token - * @throws IOException - */ - String getDelegationToken(String tokId) throws IOException { - byte[] key = HBaseUtils.buildKey(tokId); - byte[] serialized = read(SECURITY_TABLE, key, CATALOG_CF, DELEGATION_TOKEN_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeDelegationToken(serialized); - } - - /** - * Get all delegation token ids - * @return list of all delegation token identifiers - * @throws IOException - */ - List scanDelegationTokenIdentifiers() throws IOException { - Iterator iter = scan(SECURITY_TABLE, CATALOG_CF, DELEGATION_TOKEN_COL); - List ids = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - byte[] serialized = result.getValue(CATALOG_CF, DELEGATION_TOKEN_COL); - if (serialized != null) { - // Don't deserialize the value, as what we're after is the key. We just had to check the - // value wasn't null in order to check this is a record with a delegation token and not a - // master key. - ids.add(new String(result.getRow(), HBaseUtils.ENCODING)); - - } - } - return ids; - } - - /** - * Store a delegation token - * @param tokId token id - * @param token delegation token to store - * @throws IOException - */ - void putDelegationToken(String tokId, String token) throws IOException { - byte[][] serialized = HBaseUtils.serializeDelegationToken(tokId, token); - store(SECURITY_TABLE, serialized[0], CATALOG_CF, DELEGATION_TOKEN_COL, serialized[1]); - } - - /** - * Delete a delegation token - * @param tokId identifier of token to drop - * @throws IOException - */ - void deleteDelegationToken(String tokId) throws IOException { - byte[] key = HBaseUtils.buildKey(tokId); - delete(SECURITY_TABLE, key, CATALOG_CF, DELEGATION_TOKEN_COL); - } - - /** - * Fetch a master key - * @param seqNo sequence number of the master key - * @return the master key, or null if there is no such master key - * @throws IOException - */ - String getMasterKey(Integer seqNo) throws IOException { - byte[] key = HBaseUtils.buildKey(seqNo.toString()); - byte[] serialized = read(SECURITY_TABLE, key, CATALOG_CF, MASTER_KEY_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeMasterKey(serialized); - } - - /** - * Get all master keys - * @return list of all master keys - * @throws IOException - */ - List scanMasterKeys() throws IOException { - Iterator iter = scan(SECURITY_TABLE, CATALOG_CF, MASTER_KEY_COL); - List keys = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - byte[] serialized = result.getValue(CATALOG_CF, MASTER_KEY_COL); - if (serialized != null) { - keys.add(HBaseUtils.deserializeMasterKey(serialized)); - - } - } - return keys; - } - - /** - * Store a master key - * @param seqNo sequence number - * @param key master key to store - * @throws IOException - */ - void putMasterKey(Integer seqNo, String key) throws IOException { - byte[][] serialized = HBaseUtils.serializeMasterKey(seqNo, key); - store(SECURITY_TABLE, serialized[0], CATALOG_CF, MASTER_KEY_COL, serialized[1]); - } - - /** - * Delete a master key - * @param seqNo sequence number of master key to delete - * @throws IOException - */ - void deleteMasterKey(Integer seqNo) throws IOException { - byte[] key = HBaseUtils.buildKey(seqNo.toString()); - delete(SECURITY_TABLE, key, CATALOG_CF, MASTER_KEY_COL); - } - - /** - * One method to print all rows in the security table. It's not expected to be large. - * @return each row as one string - * @throws IOException - */ - List printSecurity() throws IOException { - HTableInterface htab = conn.getHBaseTable(SECURITY_TABLE); - Scan scan = new Scan(); - scan.addColumn(CATALOG_CF, MASTER_KEY_COL); - scan.addColumn(CATALOG_CF, DELEGATION_TOKEN_COL); - Iterator iter = htab.getScanner(scan).iterator(); - if (!iter.hasNext()) return Arrays.asList("No security related entries"); - List lines = new ArrayList<>(); - while (iter.hasNext()) { - Result result = iter.next(); - byte[] val = result.getValue(CATALOG_CF, MASTER_KEY_COL); - if (val != null) { - int seqNo = Integer.parseInt(new String(result.getRow(), HBaseUtils.ENCODING)); - lines.add("Master key " + seqNo + ": " + HBaseUtils.deserializeMasterKey(val)); - } else { - val = result.getValue(CATALOG_CF, DELEGATION_TOKEN_COL); - if (val == null) throw new RuntimeException("Huh? No master key, no delegation token!"); - lines.add("Delegation token " + new String(result.getRow(), HBaseUtils.ENCODING) + ": " + - HBaseUtils.deserializeDelegationToken(val)); - } - } - return lines; - } - - /********************************************************************************************** - * Sequence methods - *********************************************************************************************/ - - long peekAtSequence(byte[] sequence) throws IOException { - byte[] serialized = read(SEQUENCES_TABLE, sequence, CATALOG_CF, CATALOG_COL); - return serialized == null ? 0 : Long.parseLong(new String(serialized, HBaseUtils.ENCODING)); - } - - long getNextSequence(byte[] sequence) throws IOException { - byte[] serialized = read(SEQUENCES_TABLE, sequence, CATALOG_CF, CATALOG_COL); - long val = 0; - if (serialized != null) { - val = Long.parseLong(new String(serialized, HBaseUtils.ENCODING)); - } - byte[] incrSerialized = new Long(val + 1).toString().getBytes(HBaseUtils.ENCODING); - store(SEQUENCES_TABLE, sequence, CATALOG_CF, CATALOG_COL, incrSerialized); - return val; - } - - /** - * One method to print all entries in the sequence table. It's not expected to be large. - * @return each sequence as one string - * @throws IOException - */ - List printSequences() throws IOException { - HTableInterface htab = conn.getHBaseTable(SEQUENCES_TABLE); - Iterator iter = - scan(SEQUENCES_TABLE, CATALOG_CF, CATALOG_COL, null); - List sequences = new ArrayList<>(); - if (!iter.hasNext()) return Arrays.asList("No sequences"); - while (iter.hasNext()) { - Result result = iter.next(); - sequences.add(new StringBuilder(new String(result.getRow(), HBaseUtils.ENCODING)) - .append(": ") - .append(new String(result.getValue(CATALOG_CF, CATALOG_COL), HBaseUtils.ENCODING)) - .toString()); - } - return sequences; - } - - /********************************************************************************************** - * Constraints related methods - *********************************************************************************************/ - - /** - * Fetch a primary key - * @param dbName database the table is in - * @param tableName table name - * @return List of primary key objects, which together make up one key - * @throws IOException if there's a read error - */ - List getPrimaryKey(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, PRIMARY_KEY_COL); - if (serialized == null) return null; - return HBaseUtils.deserializePrimaryKey(dbName, tableName, serialized); - } - - /** - * Fetch a the foreign keys for a table - * @param dbName database the table is in - * @param tableName table name - * @return All of the foreign key columns thrown together in one list. Have fun sorting them out. - * @throws IOException if there's a read error - */ - List getForeignKeys(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, FOREIGN_KEY_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeForeignKeys(dbName, tableName, serialized); - } - - /** - * Fetch a unique constraint - * @param dbName database the table is in - * @param tableName table name - * @return List of unique constraints objects - * @throws IOException if there's a read error - */ - List getUniqueConstraint(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, UNIQUE_CONSTRAINT_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeUniqueConstraint(dbName, tableName, serialized); - } - - /** - * Fetch a not null constraint - * @param dbName database the table is in - * @param tableName table name - * @return List of not null constraints objects - * @throws IOException if there's a read error - */ - List getNotNullConstraint(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, NOT_NULL_CONSTRAINT_COL); - if (serialized == null) return null; - return HBaseUtils.deserializeNotNullConstraint(dbName, tableName, serialized); - } - - /** - * Create a primary key on a table. - * @param pk Primary key for this table - * @throws IOException if unable to write the data to the store. - */ - void putPrimaryKey(List pk) throws IOException { - byte[][] serialized = HBaseUtils.serializePrimaryKey(pk); - store(TABLE_TABLE, serialized[0], CATALOG_CF, PRIMARY_KEY_COL, serialized[1]); - } - - /** - * Create one or more foreign keys on a table. Note that this will not add a foreign key, it - * will overwrite whatever is there. So if you wish to add a key to a table that may already - * foreign keys you need to first use {@link #getForeignKeys(String, String)} to fetch the - * existing keys, add to the list, and then call this. - * @param fks Foreign key(s) for this table - * @throws IOException if unable to write the data to the store. - */ - void putForeignKeys(List fks) throws IOException { - byte[][] serialized = HBaseUtils.serializeForeignKeys(fks); - store(TABLE_TABLE, serialized[0], CATALOG_CF, FOREIGN_KEY_COL, serialized[1]); - } - - /** - * Create one or more unique constraints on a table. - * @param uks Unique constraints for this table - * @throws IOException if unable to write the data to the store. - */ - void putUniqueConstraints(List uks) throws IOException { - byte[][] serialized = HBaseUtils.serializeUniqueConstraints(uks); - store(TABLE_TABLE, serialized[0], CATALOG_CF, UNIQUE_CONSTRAINT_COL, serialized[1]); - } - - /** - * Create one or more not null constraints on a table. - * @param nns Not null constraints for this table - * @throws IOException if unable to write the data to the store. - */ - void putNotNullConstraints(List nns) throws IOException { - byte[][] serialized = HBaseUtils.serializeNotNullConstraints(nns); - store(TABLE_TABLE, serialized[0], CATALOG_CF, NOT_NULL_CONSTRAINT_COL, serialized[1]); - } - - /** - * Drop the primary key from a table. - * @param dbName database the table is in - * @param tableName table name - * @throws IOException if unable to delete from the store - */ - void deletePrimaryKey(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - delete(TABLE_TABLE, key, CATALOG_CF, PRIMARY_KEY_COL); - } - - /** - * Drop all foreign keys from a table. Note that this will drop all keys blindly. You should - * only call this if you're sure you want to drop them all. If you just want to drop one you - * should instead all {@link #getForeignKeys(String, String)}, modify the list it returns, and - * then call {@link #putForeignKeys(List)}. - * @param dbName database the table is in - * @param tableName table name - * @throws IOException if unable to delete from the store - */ - void deleteForeignKeys(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - delete(TABLE_TABLE, key, CATALOG_CF, FOREIGN_KEY_COL); - } - - /** - * Drop the unique constraint from a table. - * @param dbName database the table is in - * @param tableName table name - * @throws IOException if unable to delete from the store - */ - void deleteUniqueConstraint(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - delete(TABLE_TABLE, key, CATALOG_CF, UNIQUE_CONSTRAINT_COL); - } - - /** - * Drop the not null constraint from a table. - * @param dbName database the table is in - * @param tableName table name - * @throws IOException if unable to delete from the store - */ - void deleteNotNullConstraint(String dbName, String tableName) throws IOException { - byte[] key = HBaseUtils.buildKey(dbName, tableName); - delete(TABLE_TABLE, key, CATALOG_CF, NOT_NULL_CONSTRAINT_COL); - } - - /********************************************************************************************** - * Cache methods - *********************************************************************************************/ - - /** - * This should be called whenever a new query is started. - */ - void flushCatalogCache() { - if (LOG.isDebugEnabled()) { - for (Counter counter : counters) { - LOG.debug(counter.dump()); - counter.clear(); - } - statsCache.dumpCounters(); - } - tableCache.flush(); - sdCache.flush(); - partCache.flush(); - flushRoleCache(); - } - - private void flushRoleCache() { - roleCache.clear(); - entireRoleTableInCache = false; - } - - /********************************************************************************************** - * General access methods - *********************************************************************************************/ - - private void store(String table, byte[] key, byte[] colFam, byte[] colName, byte[] obj) - throws IOException { - HTableInterface htab = conn.getHBaseTable(table); - Put p = new Put(key); - p.add(colFam, colName, obj); - htab.put(p); - conn.flush(htab); - } - - private void store(String table, byte[] key, byte[] colFam, byte[][] colName, byte[][] obj) - throws IOException { - HTableInterface htab = conn.getHBaseTable(table); - Put p = new Put(key); - for (int i = 0; i < colName.length; i++) { - p.add(colFam, colName[i], obj[i]); - } - htab.put(p); - conn.flush(htab); - } - - private byte[] read(String table, byte[] key, byte[] colFam, byte[] colName) throws IOException { - HTableInterface htab = conn.getHBaseTable(table); - Get g = new Get(key); - g.addColumn(colFam, colName); - Result res = htab.get(g); - return res.getValue(colFam, colName); - } - - private void multiRead(String table, byte[] colFam, byte[] colName, - byte[][] keys, ByteBuffer[] resultDest) throws IOException { - assert keys.length == resultDest.length; - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(table); - List gets = new ArrayList<>(keys.length); - for (byte[] key : keys) { - Get g = new Get(key); - g.addColumn(colFam, colName); - gets.add(g); - } - Result[] results = htab.get(gets); - for (int i = 0; i < results.length; ++i) { - Result r = results[i]; - if (r.isEmpty()) { - resultDest[i] = null; - } else { - Cell cell = r.getColumnLatestCell(colFam, colName); - resultDest[i] = ByteBuffer.wrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - } - } - } - - private void multiModify(String table, byte[][] keys, byte[] colFam, - byte[] colName, List values) throws IOException, InterruptedException { - assert values == null || keys.length == values.size(); - // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer - // column name, but not column family. So there. Perhaps we should add these to constants too. - ByteBuffer colNameBuf = ByteBuffer.wrap(colName); - @SuppressWarnings("deprecation") - HTableInterface htab = conn.getHBaseTable(table); - List actions = new ArrayList<>(keys.length); - for (int i = 0; i < keys.length; ++i) { - ByteBuffer value = (values != null) ? values.get(i) : null; - if (value == null) { - actions.add(new Delete(keys[i])); - } else { - Put p = new Put(keys[i]); - p.addColumn(colFam, colNameBuf, HConstants.LATEST_TIMESTAMP, value); - actions.add(p); - } - } - Object[] results = new Object[keys.length]; - htab.batch(actions, results); - // TODO: should we check results array? we don't care about partial results - conn.flush(htab); - } - - private Result read(String table, byte[] key, byte[] colFam, byte[][] colNames) - throws IOException { - HTableInterface htab = conn.getHBaseTable(table); - Get g = new Get(key); - for (byte[] colName : colNames) g.addColumn(colFam, colName); - return htab.get(g); - } - - // Delete a row. If colFam and colName are not null, then only the named column will be - // deleted. If colName is null and colFam is not, only the named family will be deleted. If - // both are null the entire row will be deleted. - private void delete(String table, byte[] key, byte[] colFam, byte[] colName) throws IOException { - HTableInterface htab = conn.getHBaseTable(table); - Delete d = new Delete(key); - if (colName != null) d.deleteColumn(colFam, colName); - else if (colFam != null) d.deleteFamily(colFam); - htab.delete(d); - } - - private Iterator scan(String table, byte[] colFam, byte[] colName) throws IOException { - return scan(table, null, null, colFam, colName, null); - } - - private Iterator scan(String table, byte[] colFam, byte[] colName, - Filter filter) throws IOException { - return scan(table, null, null, colFam, colName, filter); - } - - private Iterator scan(String table, Filter filter) throws IOException { - return scan(table, null, null, null, null, filter); - } - - private Iterator scan(String table, byte[] keyStart, byte[] keyEnd, byte[] colFam, - byte[] colName, Filter filter) throws IOException { - HTableInterface htab = conn.getHBaseTable(table); - Scan s = new Scan(); - if (keyStart != null) { - s.setStartRow(keyStart); - } - if (keyEnd != null) { - s.setStopRow(keyEnd); - } - if (colFam != null && colName != null) { - s.addColumn(colFam, colName); - } - if (filter != null) { - s.setFilter(filter); - } - ResultScanner scanner = htab.getScanner(s); - return scanner.iterator(); - } - - /********************************************************************************************** - * Printing methods - *********************************************************************************************/ - private String noSuch(String name, String type) { - return "No such " + type + ": " + name.replaceAll(HBaseUtils.KEY_SEPARATOR_STR, "."); - } - - private List noMatch(String regex, String type) { - return Arrays.asList("No matching " + type + ": " + regex); - } - - private String dumpThriftObject(TBase obj) throws TException, UnsupportedEncodingException { - TMemoryBuffer buf = new TMemoryBuffer(1000); - TProtocol protocol = new TSimpleJSONProtocol(buf); - obj.write(protocol); - return buf.toString("UTF-8"); - } - - /********************************************************************************************** - * Testing methods and classes - *********************************************************************************************/ - @VisibleForTesting - int countStorageDescriptor() throws IOException { - ResultScanner scanner = conn.getHBaseTable(SD_TABLE).getScanner(new Scan()); - int cnt = 0; - Result r; - do { - r = scanner.next(); - if (r != null) { - LOG.debug("Saw record with hash " + Base64.encodeBase64String(r.getRow())); - cnt++; - } - } while (r != null); - - return cnt; - } - - /** - * Use this for unit testing only, so that a mock connection object can be passed in. - * @param connection Mock connection objecct - */ - @VisibleForTesting - static void setTestConnection(HBaseConnection connection) { - testConn = connection; - } - - - // For testing without the cache - private static class BogusObjectCache extends ObjectCache { - static Counter bogus = new Counter("bogus"); - - BogusObjectCache() { - super(1, bogus, bogus, bogus); - } - - @Override - V get(K key) { - return null; - } - } - - private static class BogusPartitionCache extends PartitionCache { - static Counter bogus = new Counter("bogus"); - - BogusPartitionCache() { - super(1, bogus, bogus, bogus); - } - - @Override - Collection getAllForTable(String dbName, String tableName) { - return null; - } - - @Override - Partition get(String dbName, String tableName, List partVals) { - return null; - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java deleted file mode 100644 index b4f8734..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java +++ /dev/null @@ -1,192 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.OptionBuilder; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.conf.Configuration; - -import java.io.PrintStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * A tool to dump contents from the HBase store in a human readable form - */ -public class HBaseSchemaTool { - - public static void main(String[] args) { - Options options = new Options(); - - options.addOption(OptionBuilder - .withLongOpt("help") - .withDescription("You're looking at it") - .create('h')); - - options.addOption(OptionBuilder - .withLongOpt("install") - .withDescription("Install the schema onto an HBase cluster.") - .create('i')); - - options.addOption(OptionBuilder - .withLongOpt("key") - .withDescription("Key to scan with. This should be an exact key (not a regular expression") - .hasArg() - .create('k')); - - options.addOption(OptionBuilder - .withLongOpt("list-tables") - .withDescription("List tables in HBase metastore") - .create('l')); - - options.addOption(OptionBuilder - .withLongOpt("regex-key") - .withDescription("Regular expression to scan keys with.") - .hasArg() - .create('r')); - - options.addOption(OptionBuilder - .withLongOpt("table") - .withDescription("HBase metastore table to scan") - .hasArg() - .create('t')); - - CommandLine cli = null; - try { - cli = new GnuParser().parse(options, args); - } catch (ParseException e) { - System.err.println("Parse Exception: " + e.getMessage()); - usage(options); - return; - } - - if (cli.hasOption('h')) { - usage(options); - return; - } - - Configuration conf = new Configuration(); - - if (cli.hasOption('i')) { - new HBaseSchemaTool().install(conf, System.err); - return; - } - - String key = null; - if (cli.hasOption('k')) key = cli.getOptionValue('k'); - String regex = null; - if (cli.hasOption('r')) regex = cli.getOptionValue('r'); - if (key != null && regex != null) { - usage(options); - return; - } - if (key == null && regex == null) regex = ".*"; - - // I do this in the object rather than in the static main so that it's easier to test. - new HBaseSchemaTool().go(cli.hasOption('l'), cli.getOptionValue('t'), key, regex, conf, - System.out, System.err); - } - - private static void usage(Options options) { - HelpFormatter formatter = new HelpFormatter(); - String header = "This tool dumps contents of your hbase metastore. You need to specify\n" + - "the table to dump. You can optionally specify a regular expression on the key for\n" + - "the table. Keep in mind that the key is often a compound. For partitions regular\n" + - "expressions are not used because non-string values are\nstored in binary. Instead for " + - "partition you can specify as much of the exact prefix as you want. So you can give " + - "dbname.tablename or dbname.tablename.pval1..."; - String footer = "If neither key or regex is provided a regex of .* will be assumed. You\n" + - "cannot set both key and regex."; - formatter.printHelp("hbaseschematool", header, options, footer); - return; - } - - @VisibleForTesting void go(boolean listTables, String table, String key, String regex, - Configuration conf, PrintStream out, PrintStream err) { - List lines = new ArrayList<>(); - if (listTables) { - lines = Arrays.asList(HBaseReadWrite.tableNames); - } else { - // If they've used '.' as a key separator we need to replace it with the separator used by - // HBaseUtils - if (key != null) key = key.replace('.', HBaseUtils.KEY_SEPARATOR); - try { - HBaseReadWrite.setConf(conf); - HBaseReadWrite hrw = HBaseReadWrite.getInstance(); - if (table.equalsIgnoreCase(HBaseReadWrite.DB_TABLE)) { - if (key != null) lines.add(hrw.printDatabase(key)); - else lines.addAll(hrw.printDatabases(regex)); - } else if (table.equalsIgnoreCase(HBaseReadWrite.FUNC_TABLE)) { - if (key != null) lines.add(hrw.printFunction(key)); - else lines.addAll(hrw.printFunctions(regex)); - } else if (table.equalsIgnoreCase(HBaseReadWrite.GLOBAL_PRIVS_TABLE)) { - // Ignore whatever they passed, there's always only either one or zero global privileges - lines.add(hrw.printGlobalPrivs()); - } else if (table.equalsIgnoreCase(HBaseReadWrite.PART_TABLE)) { - if (key != null) lines.add(hrw.printPartition(key)); - else lines.addAll(hrw.printPartitions(regex)); - } else if (table.equalsIgnoreCase(HBaseReadWrite.USER_TO_ROLE_TABLE)) { - if (key != null) lines.add(hrw.printRolesForUser(key)); - else lines.addAll(hrw.printRolesForUsers(regex)); - } else if (table.equalsIgnoreCase(HBaseReadWrite.ROLE_TABLE)) { - if (key != null) lines.add(hrw.printRole(key)); - else lines.addAll(hrw.printRoles(regex)); - } else if (table.equalsIgnoreCase(HBaseReadWrite.TABLE_TABLE)) { - if (key != null) lines.add(hrw.printTable(key)); - else lines.addAll(hrw.printTables(regex)); - } else if (table.equalsIgnoreCase(HBaseReadWrite.SD_TABLE)) { - if (key != null) lines.add(hrw.printStorageDescriptor(Base64.decodeBase64(key))); - else lines.addAll(hrw.printStorageDescriptors()); - } else if (table.equalsIgnoreCase(HBaseReadWrite.SECURITY_TABLE)) { - // We always print all of security, we don't worry about finding particular entries. - lines.addAll(hrw.printSecurity()); - } else if (table.equalsIgnoreCase(HBaseReadWrite.SEQUENCES_TABLE)) { - // We always print all of sequences, we don't worry about finding particular entries. - lines.addAll(hrw.printSequences()); - } else { - err.println("Unknown table: " + table); - return; - } - } catch (Exception e) { - err.println("Caught exception " + e.getClass() + " with message: " + e.getMessage()); - return; - } - } - for (String line : lines) out.println(line); - } - - @VisibleForTesting void install(Configuration conf, PrintStream err) { - try { - // We need to set the conf because createTablesIfNotExist will get a thread local version - // which requires that the configuration object be set. - HBaseReadWrite.setConf(conf); - HBaseReadWrite.createTablesIfNotExist(); - } catch (Exception e) { - err.println("Caught exception " + e.getClass() + " with message: " + e.getMessage()); - return; - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java deleted file mode 100644 index d01f814..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ /dev/null @@ -1,2969 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.CacheLoader; - -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.FileMetadataHandler; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.PartFilterExprUtil; -import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; -import org.apache.hadoop.hive.metastore.RawStore; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hive.common.util.HiveStringUtils; -import org.apache.thrift.TException; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -/** - * Implementation of RawStore that stores data in HBase - */ -public class HBaseStore implements RawStore { - static final private Logger LOG = LoggerFactory.getLogger(HBaseStore.class.getName()); - - // Do not access this directly, call getHBase to make sure it is initialized. - private HBaseReadWrite hbase = null; - private Configuration conf; - private int txnNestLevel = 0; - private PartitionExpressionProxy expressionProxy = null; - private Map fmHandlers; - - public HBaseStore() { - } - - @Override - public void shutdown() { - try { - if (txnNestLevel != 0) rollbackTransaction(); - getHBase().close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public boolean openTransaction() { - if (txnNestLevel++ <= 0) { - LOG.debug("Opening HBase transaction"); - getHBase().begin(); - txnNestLevel = 1; - } - return true; - } - - @Override - public boolean commitTransaction() { - if (--txnNestLevel == 0) { - LOG.debug("Committing HBase transaction"); - getHBase().commit(); - } - return true; - } - - @Override - public boolean isActiveTransaction() { - return txnNestLevel != 0; - } - - @Override - public void rollbackTransaction() { - txnNestLevel = 0; - LOG.debug("Rolling back HBase transaction"); - getHBase().rollback(); - } - - @Override - public void createDatabase(Database db) throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - Database dbCopy = db.deepCopy(); - dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName())); - // HiveMetaStore already checks for existence of the database, don't recheck - getHBase().putDb(dbCopy); - commit = true; - } catch (IOException e) { - LOG.error("Unable to create database ", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - - } - - @Override - public Database getDatabase(String name) throws NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - Database db = getHBase().getDb(HiveStringUtils.normalizeIdentifier(name)); - if (db == null) { - throw new NoSuchObjectException("Unable to find db " + name); - } - commit = true; - return db; - } catch (IOException e) { - LOG.error("Unable to get db", e); - throw new NoSuchObjectException("Error reading db " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - getHBase().deleteDb(HiveStringUtils.normalizeIdentifier(dbname)); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to delete db" + e); - throw new MetaException("Unable to drop database " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, - MetaException { - // ObjectStore fetches the old db before updating it, but I can't see the possible value of - // that since the caller will have needed to call getDatabase to have the db object. - boolean commit = false; - openTransaction(); - try { - Database dbCopy = db.deepCopy(); - dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName())); - getHBase().putDb(dbCopy); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to alter database ", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getDatabases(String pattern) throws MetaException { - boolean commit = false; - openTransaction(); - try { - List dbs = getHBase().scanDatabases( - pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern))); - List dbNames = new ArrayList(dbs.size()); - for (Database db : dbs) dbNames.add(db.getName()); - commit = true; - return dbNames; - } catch (IOException e) { - LOG.error("Unable to get databases ", e); - throw new MetaException("Unable to get databases, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getAllDatabases() throws MetaException { - return getDatabases(null); - } - - @Override - public int getDatabaseCount() throws MetaException { - try { - return getHBase().getDatabaseCount(); - } catch (IOException e) { - LOG.error("Unable to get database count", e); - throw new MetaException("Error scanning databases"); - } - } - - @Override - public boolean createType(Type type) { - throw new UnsupportedOperationException(); - } - - @Override - public Type getType(String typeName) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean dropType(String typeName) { - throw new UnsupportedOperationException(); - } - - @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - // HiveMetaStore above us checks if the table already exists, so we can blindly store it here. - try { - Table tblCopy = tbl.deepCopy(); - tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(tblCopy.getDbName())); - tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblCopy.getTableName())); - normalizeColumnNames(tblCopy); - getHBase().putTable(tblCopy); - commit = true; - } catch (IOException e) { - LOG.error("Unable to create table ", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - private void normalizeColumnNames(Table tbl) { - if (tbl.getSd().getCols() != null) { - tbl.getSd().setCols(normalizeFieldSchemaList(tbl.getSd().getCols())); - } - if (tbl.getPartitionKeys() != null) { - tbl.setPartitionKeys(normalizeFieldSchemaList(tbl.getPartitionKeys())); - } - } - - private List normalizeFieldSchemaList(List fieldschemas) { - List ret = new ArrayList<>(); - for (FieldSchema fieldSchema : fieldschemas) { - ret.add(new FieldSchema(fieldSchema.getName().toLowerCase(), fieldSchema.getType(), - fieldSchema.getComment())); - } - return ret; - } - - @Override - public boolean dropTable(String dbName, String tableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean commit = false; - openTransaction(); - try { - getHBase().deleteTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to delete db" + e); - throw new MetaException("Unable to drop table " + tableNameForErrorMsg(dbName, tableName)); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public Table getTable(String dbName, String tableName) throws MetaException { - boolean commit = false; - openTransaction(); - try { - Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); - if (table == null) { - LOG.debug("Unable to find table " + tableNameForErrorMsg(dbName, tableName)); - } - commit = true; - return table; - } catch (IOException e) { - LOG.error("Unable to get table", e); - throw new MetaException("Error reading table " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - Partition partCopy = part.deepCopy(); - partCopy.setDbName(HiveStringUtils.normalizeIdentifier(part.getDbName())); - partCopy.setTableName(HiveStringUtils.normalizeIdentifier(part.getTableName())); - getHBase().putPartition(partCopy); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to add partition", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean addPartitions(String dbName, String tblName, List parts) - throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - List partsCopy = new ArrayList(); - for (int i=0;i part_vals) throws - MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - Partition part = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); - if (part == null) { - throw new NoSuchObjectException("Unable to find partition " + - partNameForErrorMsg(dbName, tableName, part_vals)); - } - commit = true; - return part; - } catch (IOException e) { - LOG.error("Unable to get partition", e); - throw new MetaException("Error reading partition " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean doesPartitionExist(String dbName, String tableName, List part_vals) throws - MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - boolean exists = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals) != null; - commit = true; - return exists; - } catch (IOException e) { - LOG.error("Unable to get partition", e); - throw new MetaException("Error reading partition " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean dropPartition(String dbName, String tableName, List part_vals) throws - MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean commit = false; - openTransaction(); - try { - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - getHBase().deletePartition(dbName, tableName, HBaseUtils.getPartitionKeyTypes( - getTable(dbName, tableName).getPartitionKeys()), part_vals); - // Drop any cached stats that reference this partitions - getHBase().getStatsCache().invalidate(dbName, tableName, - buildExternalPartName(dbName, tableName, part_vals)); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to delete db" + e); - throw new MetaException("Unable to drop partition " + partNameForErrorMsg(dbName, tableName, - part_vals)); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getPartitions(String dbName, String tableName, int max) throws - MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - List parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), max); - commit = true; - return parts; - } catch (IOException e) { - LOG.error("Unable to get partitions", e); - throw new MetaException("Error scanning partitions"); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void alterTable(String dbName, String tableName, Table newTable) throws InvalidObjectException, - MetaException { - boolean commit = false; - openTransaction(); - try { - Table newTableCopy = newTable.deepCopy(); - newTableCopy.setDbName(HiveStringUtils.normalizeIdentifier(newTableCopy.getDbName())); - List oldPartTypes = getTable(dbName, tableName).getPartitionKeys()==null? - null:HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()); - newTableCopy.setTableName(HiveStringUtils.normalizeIdentifier(newTableCopy.getTableName())); - getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)), newTableCopy); - if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0 - && !tableName.equals(newTable.getTableName())) { - // They renamed the table, so we need to change each partition as well, since it changes - // the key. - try { - List oldParts = getPartitions(dbName, tableName, -1); - List newParts = new ArrayList<>(oldParts.size()); - for (Partition oldPart : oldParts) { - Partition newPart = oldPart.deepCopy(); - newPart.setTableName(newTable.getTableName()); - newParts.add(newPart); - } - getHBase().replacePartitions(oldParts, newParts, oldPartTypes); - } catch (NoSuchObjectException e) { - LOG.debug("No partitions found for old table so not worrying about it"); - } - - } - commit = true; - } catch (IOException e) { - LOG.error("Unable to alter table " + tableNameForErrorMsg(dbName, tableName), e); - throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbName, tableName)); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getTables(String dbName, String pattern) throws MetaException { - boolean commit = false; - openTransaction(); - try { - List tableNames = getTableNamesInTx(dbName, pattern); - commit = true; - return tableNames; - } catch (IOException e) { - LOG.error("Unable to get tables ", e); - throw new MetaException("Unable to get tables, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { - throw new UnsupportedOperationException(); - } - - private List getTableNamesInTx(String dbName, String pattern) throws IOException { - List
tables = getHBase().scanTables(HiveStringUtils.normalizeIdentifier(dbName), - pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern))); - List tableNames = new ArrayList(tables.size()); - for (Table table : tables) tableNames.add(table.getTableName()); - return tableNames; - } - - @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) - throws MetaException { - boolean commit = false; - openTransaction(); - try { - List metas = new ArrayList<>(); - for (String dbName : getDatabases(dbNames)) { - for (Table table : getTableObjectsByName(dbName, getTableNamesInTx(dbName, tableNames))) { - if (tableTypes == null || tableTypes.contains(table.getTableType())) { - TableMeta metaData = new TableMeta( - table.getDbName(), table.getTableName(), table.getTableType()); - metaData.setComments(table.getParameters().get("comment")); - metas.add(metaData); - } - } - } - commit = true; - return metas; - } catch (Exception e) { - LOG.error("Unable to get tables ", e); - throw new MetaException("Unable to get tables, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List
getTableObjectsByName(String dbname, List tableNames) throws - MetaException, UnknownDBException { - boolean commit = false; - openTransaction(); - try { - List normalizedTableNames = new ArrayList(tableNames.size()); - for (String tableName : tableNames) { - normalizedTableNames.add(HiveStringUtils.normalizeIdentifier(tableName)); - } - List
tables = getHBase().getTables(HiveStringUtils.normalizeIdentifier(dbname), - normalizedTableNames); - commit = true; - return tables; - } catch (IOException e) { - LOG.error("Unable to get tables ", e); - throw new MetaException("Unable to get tables, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getAllTables(String dbName) throws MetaException { - return getTables(dbName, null); - } - - @Override - public int getTableCount() throws MetaException { - try { - return getHBase().getTableCount(); - } catch (IOException e) { - LOG.error("Unable to get table count", e); - throw new MetaException("Error scanning tables"); - } - } - - @Override - public List listTableNamesByFilter(String dbName, String filter, short max_tables) throws - MetaException, UnknownDBException { - // TODO needs to wait until we support pushing filters into HBase. - throw new UnsupportedOperationException(); - } - - @Override - public List listPartitionNames(String db_name, String tbl_name, short max_parts) throws - MetaException { - boolean commit = false; - openTransaction(); - try { - List parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), max_parts); - if (parts == null) return null; - List names = new ArrayList(parts.size()); - Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name)); - for (Partition p : parts) { - names.add(buildExternalPartName(table, p)); - } - commit = true; - return names; - } catch (IOException e) { - LOG.error("Unable to get partitions", e); - throw new MetaException("Error scanning partitions"); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, - short max_parts) throws MetaException { - // TODO needs to wait until we support pushing filters into HBase. - throw new UnsupportedOperationException(); - } - - @Override - public void alterPartition(String db_name, String tbl_name, List part_vals, - Partition new_part) throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - Partition new_partCopy = new_part.deepCopy(); - new_partCopy.setDbName(HiveStringUtils.normalizeIdentifier(new_partCopy.getDbName())); - new_partCopy.setTableName(HiveStringUtils.normalizeIdentifier(new_partCopy.getTableName())); - Partition oldPart = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part_vals); - getHBase().replacePartition(oldPart, new_partCopy, HBaseUtils.getPartitionKeyTypes( - getTable(db_name, tbl_name).getPartitionKeys())); - // Drop any cached stats that reference this partitions - getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), - buildExternalPartName(db_name, tbl_name, part_vals)); - commit = true; - } catch (IOException e) { - LOG.error("Unable to add partition", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void alterPartitions(String db_name, String tbl_name, List> part_vals_list, - List new_parts) throws InvalidObjectException, - MetaException { - boolean commit = false; - openTransaction(); - try { - List new_partsCopy = new ArrayList(); - for (int i=0;i oldParts = getHBase().getPartitions(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), - HBaseUtils.getPartitionKeyTypes(getTable(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name)).getPartitionKeys()), part_vals_list); - getHBase().replacePartitions(oldParts, new_partsCopy, HBaseUtils.getPartitionKeyTypes( - getTable(db_name, tbl_name).getPartitionKeys())); - for (List part_vals : part_vals_list) { - getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), - buildExternalPartName(db_name, tbl_name, part_vals)); - } - commit = true; - } catch (IOException e) { - LOG.error("Unable to add partition", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean addIndex(Index index) throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - index.setDbName(HiveStringUtils.normalizeIdentifier(index.getDbName())); - index.setOrigTableName(HiveStringUtils.normalizeIdentifier(index.getOrigTableName())); - index.setIndexName(HiveStringUtils.normalizeIdentifier(index.getIndexName())); - index.setIndexTableName(HiveStringUtils.normalizeIdentifier(index.getIndexTableName())); - getHBase().putIndex(index); - commit = true; - } catch (IOException e) { - LOG.error("Unable to create index ", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - return commit; - } - - @Override - public Index getIndex(String dbName, String origTableName, String indexName) throws - MetaException { - boolean commit = false; - openTransaction(); - try { - Index index = getHBase().getIndex(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(origTableName), - HiveStringUtils.normalizeIdentifier(indexName)); - if (index == null) { - LOG.debug("Unable to find index " + indexNameForErrorMsg(dbName, origTableName, indexName)); - } - commit = true; - return index; - } catch (IOException e) { - LOG.error("Unable to get index", e); - throw new MetaException("Error reading index " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean dropIndex(String dbName, String origTableName, String indexName) throws - MetaException { - boolean commit = false; - openTransaction(); - try { - getHBase().deleteIndex(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(origTableName), - HiveStringUtils.normalizeIdentifier(indexName)); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to delete index" + e); - throw new MetaException("Unable to drop index " - + indexNameForErrorMsg(dbName, origTableName, indexName)); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getIndexes(String dbName, String origTableName, int max) throws MetaException { - boolean commit = false; - openTransaction(); - try { - List indexes = getHBase().scanIndexes(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(origTableName), max); - commit = true; - return indexes; - } catch (IOException e) { - LOG.error("Unable to get indexes", e); - throw new MetaException("Error scanning indexxes"); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listIndexNames(String dbName, String origTableName, short max) throws - MetaException { - boolean commit = false; - openTransaction(); - try { - List indexes = getHBase().scanIndexes(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(origTableName), max); - if (indexes == null) return null; - List names = new ArrayList(indexes.size()); - for (Index index : indexes) { - names.add(index.getIndexName()); - } - commit = true; - return names; - } catch (IOException e) { - LOG.error("Unable to get indexes", e); - throw new MetaException("Error scanning indexes"); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws - InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - Index newIndexCopy = newIndex.deepCopy(); - newIndexCopy.setDbName(HiveStringUtils.normalizeIdentifier(newIndexCopy.getDbName())); - newIndexCopy.setOrigTableName( - HiveStringUtils.normalizeIdentifier(newIndexCopy.getOrigTableName())); - newIndexCopy.setIndexName(HiveStringUtils.normalizeIdentifier(newIndexCopy.getIndexName())); - getHBase().replaceIndex(getHBase().getIndex(HiveStringUtils.normalizeIdentifier(dbname), - HiveStringUtils.normalizeIdentifier(baseTblName), - HiveStringUtils.normalizeIdentifier(name)), newIndexCopy); - commit = true; - } catch (IOException e) { - LOG.error("Unable to alter index " + indexNameForErrorMsg(dbname, baseTblName, name), e); - throw new MetaException("Unable to alter index " - + indexNameForErrorMsg(dbname, baseTblName, name)); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getPartitionsByFilter(String dbName, String tblName, String filter, - short maxParts) throws MetaException, - NoSuchObjectException { - final ExpressionTree exprTree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil - .getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - List result = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - getPartitionsByExprInternal(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result); - return result; - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, - List result) throws TException { - final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tblName = HiveStringUtils.normalizeIdentifier(tblName); - Table table = getTable(dbName, tblName); - boolean commit = false; - openTransaction(); - try { - if (exprTree == null) { - List partNames = new LinkedList(); - boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn( - table, expr, defaultPartitionName, maxParts, partNames); - result.addAll(getPartitionsByNames(dbName, tblName, partNames)); - return hasUnknownPartitions; - } else { - return getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result); - } - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public int getNumPartitionsByFilter(String dbName, String tblName, String filter) - throws MetaException, NoSuchObjectException { - final ExpressionTree exprTree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil - .getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - List result = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - return getPartitionsByFilter(dbName, tblName, filter, Short.MAX_VALUE).size(); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) - throws MetaException, NoSuchObjectException { - final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); - List result = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - getPartitionsByExprInternal(dbName, tblName, exprTree, Short.MAX_VALUE, result); - return result.size(); - } finally { - commitOrRoleBack(commit); - } - } - - /** - * Gets the partition names from a table, pruned using an expression. - * @param table Table. - * @param expr Expression. - * @param defaultPartName Default partition name from job config, if any. - * @param maxParts Maximum number of partition names to return. - * @param result The resulting names. - * @return Whether the result contains any unknown partitions. - * @throws NoSuchObjectException - */ - private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, - String defaultPartName, short maxParts, List result) throws MetaException, NoSuchObjectException { - List parts = getPartitions( - table.getDbName(), table.getTableName(), maxParts); - for (Partition part : parts) { - result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); - } - List columnNames = new ArrayList(); - List typeInfos = new ArrayList(); - for (FieldSchema fs : table.getPartitionKeys()) { - columnNames.add(fs.getName()); - typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType())); - } - if (defaultPartName == null || defaultPartName.isEmpty()) { - defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); - } - return expressionProxy.filterPartitionsByExpr( - columnNames, typeInfos, expr, defaultPartName, result); - } - - private boolean getPartitionsByExprInternal(String dbName, String tblName, - ExpressionTree exprTree, short maxParts, List result) throws MetaException, - NoSuchObjectException { - - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tblName = HiveStringUtils.normalizeIdentifier(tblName); - Table table = getTable(dbName, tblName); - if (table == null) { - throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName); - } - // general hbase filter plan from expression tree - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys()); - if (LOG.isDebugEnabled()) { - LOG.debug("Hbase Filter Plan generated : " + planRes.plan); - } - - // results from scans need to be merged as there can be overlapping results between - // the scans. Use a map of list of partition values to partition for this. - Map, Partition> mergedParts = new HashMap, Partition>(); - for (ScanPlan splan : planRes.plan.getPlans()) { - try { - List parts = getHBase().scanPartitions(dbName, tblName, - splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()), - splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()), - splan.getFilter(table.getPartitionKeys()), -1); - boolean reachedMax = false; - for (Partition part : parts) { - mergedParts.put(part.getValues(), part); - if (mergedParts.size() == maxParts) { - reachedMax = true; - break; - } - } - if (reachedMax) { - break; - } - } catch (IOException e) { - LOG.error("Unable to get partitions", e); - throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) - + ": " + e); - } - } - for (Entry, Partition> mp : mergedParts.entrySet()) { - result.add(mp.getValue()); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Matched partitions " + result); - } - - // return true if there might be some additional partitions that don't match filter conditions - // being returned - return !planRes.hasUnsupportedCondition; - } - - @Override - public List getPartitionsByNames(String dbName, String tblName, - List partNames) throws MetaException, - NoSuchObjectException { - List parts = new ArrayList(); - for (String partName : partNames) { - parts.add(getPartition(dbName, tblName, partNameToVals(partName))); - } - return parts; - } - - @Override - public Table markPartitionForEvent(String dbName, String tblName, Map partVals, - PartitionEventType evtType) throws MetaException, - UnknownTableException, InvalidPartitionException, UnknownPartitionException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, - Map partName, - PartitionEventType evtType) throws MetaException, - UnknownTableException, InvalidPartitionException, UnknownPartitionException { - throw new UnsupportedOperationException(); - } - - /* - * The design for roles. Roles are a pain because of their hierarchical nature. When a user - * comes in and we need to be able to determine all roles he is a part of, we do not want to - * have to walk the hierarchy in the database. This means we need to flatten the role map for - * each user. But we also have to track how the roles are connected for each user, in case one - * role is revoked from another (e.g. if role1 is included in role2 but then revoked - * from it and user1 was granted both role2 and role1 we cannot remove user1 from role1 - * because he was granted that separately). - * - * We want to optimize for the read case and put the cost on grant and revoke of roles, since - * we assume that is the less common case. So we lay out the roles data as follows: - * - * There is a ROLES table that records each role, plus what other principals have been granted - * into it, along with the info on grantor, etc. - * - * There is a USER_TO_ROLES table that contains the mapping of each user to every role he is a - * part of. - * - * This makes determining what roles a user participates in very quick, as USER_TO_ROLE is a - * simple list for each user. It makes granting users into roles expensive, and granting roles - * into roles very expensive. Each time a user is granted into a role, we need to walk the - * hierarchy in the role table (which means moving through that table multiple times) to - * determine every role the user participates in. Each a role is granted into another role - * this hierarchical walk must be done for every principal in the role being granted into. To - * mitigate this pain somewhat whenever doing these mappings we cache the entire ROLES table in - * memory since we assume it is not large. - * - * On a related note, whenever a role is dropped we must walk not only all these role tables - * above (equivalent to a role being revoked from another role, since we have to rebuilding - * mappings for any users in roles that contained that role and any users directly in that - * role), but we also have to remove all the privileges associated with that role directly. - * That means a walk of the DBS table and of the TBLS table. - */ - - @Override - public int getPartitionCount() throws MetaException { - try { - return getHBase().getPartitionCount(); - } catch (IOException e) { - LOG.error("Unable to get partition count", e); - throw new MetaException("Error scanning partitions"); - } - } - - @Override - public boolean addRole(String roleName, String ownerName) throws InvalidObjectException, - MetaException, NoSuchObjectException { - int now = (int)(System.currentTimeMillis()/1000); - Role role = new Role(roleName, now, ownerName); - boolean commit = false; - openTransaction(); - try { - if (getHBase().getRole(roleName) != null) { - throw new InvalidObjectException("Role " + roleName + " already exists"); - } - getHBase().putRole(role); - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to create role ", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - Set usersInRole = getHBase().findAllUsersInRole(roleName); - getHBase().deleteRole(roleName); - getHBase().removeRoleGrants(roleName); - for (String user : usersInRole) { - getHBase().buildRoleMapForUser(user); - } - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to delete role" + e); - throw new MetaException("Unable to drop role " + roleName); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, - PrincipalType grantorType, boolean grantOption) - throws MetaException, NoSuchObjectException, InvalidObjectException { - boolean commit = false; - openTransaction(); - try { - Set usersToRemap = findUsersToRemapRolesFor(role, userName, principalType); - HbaseMetastoreProto.RoleGrantInfo.Builder builder = - HbaseMetastoreProto.RoleGrantInfo.newBuilder(); - if (userName != null) builder.setPrincipalName(userName); - if (principalType != null) { - builder.setPrincipalType(HBaseUtils.convertPrincipalTypes(principalType)); - } - builder.setAddTime((int)(System.currentTimeMillis() / 1000)); - if (grantor != null) builder.setGrantor(grantor); - if (grantorType != null) { - builder.setGrantorType(HBaseUtils.convertPrincipalTypes(grantorType)); - } - builder.setGrantOption(grantOption); - - getHBase().addPrincipalToRole(role.getRoleName(), builder.build()); - for (String user : usersToRemap) { - getHBase().buildRoleMapForUser(user); - } - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to grant role", e); - throw new MetaException("Unable to grant role " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean revokeRole(Role role, String userName, PrincipalType principalType, - boolean grantOption) throws MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - // This can have a couple of different meanings. If grantOption is true, then this is only - // revoking the grant option, the role itself doesn't need to be removed. If it is false - // then we need to remove the userName from the role altogether. - try { - if (grantOption) { - // If this is a grant only change, we don't need to rebuild the user mappings. - getHBase().dropPrincipalFromRole(role.getRoleName(), userName, principalType, grantOption); - } else { - Set usersToRemap = findUsersToRemapRolesFor(role, userName, principalType); - getHBase().dropPrincipalFromRole(role.getRoleName(), userName, principalType, grantOption); - for (String user : usersToRemap) { - getHBase().buildRoleMapForUser(user); - } - } - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to revoke role " + role.getRoleName() + " from " + userName, e); - throw new MetaException("Unable to revoke role " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) - throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); - PrincipalPrivilegeSet global = getHBase().getGlobalPrivs(); - if (global == null) return null; - List pgi; - if (global.getUserPrivileges() != null) { - pgi = global.getUserPrivileges().get(userName); - if (pgi != null) { - pps.putToUserPrivileges(userName, pgi); - } - } - - if (global.getRolePrivileges() != null) { - List roles = getHBase().getUserRoles(userName); - if (roles != null) { - for (String role : roles) { - pgi = global.getRolePrivileges().get(role); - if (pgi != null) { - pps.putToRolePrivileges(role, pgi); - } - } - } - } - commit = true; - return pps; - } catch (IOException e) { - LOG.error("Unable to get db privileges for user", e); - throw new MetaException("Unable to get db privileges for user, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, - List groupNames) - throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); - Database db = getHBase().getDb(dbName); - if (db.getPrivileges() != null) { - List pgi; - // Find the user privileges for this db - if (db.getPrivileges().getUserPrivileges() != null) { - pgi = db.getPrivileges().getUserPrivileges().get(userName); - if (pgi != null) { - pps.putToUserPrivileges(userName, pgi); - } - } - - if (db.getPrivileges().getRolePrivileges() != null) { - List roles = getHBase().getUserRoles(userName); - if (roles != null) { - for (String role : roles) { - pgi = db.getPrivileges().getRolePrivileges().get(role); - if (pgi != null) { - pps.putToRolePrivileges(role, pgi); - } - } - } - } - } - commit = true; - return pps; - } catch (IOException e) { - LOG.error("Unable to get db privileges for user", e); - throw new MetaException("Unable to get db privileges for user, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, - String userName, List groupNames) - throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); - Table table = getHBase().getTable(dbName, tableName); - List pgi; - if (table.getPrivileges() != null) { - if (table.getPrivileges().getUserPrivileges() != null) { - pgi = table.getPrivileges().getUserPrivileges().get(userName); - if (pgi != null) { - pps.putToUserPrivileges(userName, pgi); - } - } - - if (table.getPrivileges().getRolePrivileges() != null) { - List roles = getHBase().getUserRoles(userName); - if (roles != null) { - for (String role : roles) { - pgi = table.getPrivileges().getRolePrivileges().get(role); - if (pgi != null) { - pps.putToRolePrivileges(role, pgi); - } - } - } - } - } - commit = true; - return pps; - } catch (IOException e) { - LOG.error("Unable to get db privileges for user", e); - throw new MetaException("Unable to get db privileges for user, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, - String partition, String userName, - List groupNames) throws - InvalidObjectException, MetaException { - // We don't support partition privileges - return null; - } - - @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, - String partitionName, String columnName, - String userName, - List groupNames) throws - InvalidObjectException, MetaException { - // We don't support column level privileges - return null; - } - - @Override - public List listPrincipalGlobalGrants(String principalName, - PrincipalType principalType) { - List grants; - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs(); - if (pps == null) return privileges; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - if (map == null) return privileges; - grants = map.get(principalName); - - if (grants == null || grants.size() == 0) return privileges; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), principalName, principalType, pgi)); - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, - String dbName) { - List grants; - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - Database db = getHBase().getDb(dbName); - if (db == null) return privileges; - PrincipalPrivilegeSet pps = db.getPrivileges(); - if (pps == null) return privileges; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - if (map == null) return privileges; - grants = map.get(principalName); - - if (grants == null || grants.size() == 0) return privileges; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, - null, null, null), principalName, principalType, pgi)); - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listAllTableGrants(String principalName, - PrincipalType principalType, - String dbName, - String tableName) { - List grants; - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - Table table = getHBase().getTable(dbName, tableName); - if (table == null) return privileges; - PrincipalPrivilegeSet pps = table.getPrivileges(); - if (pps == null) return privileges; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - if (map == null) return privileges; - grants = map.get(principalName); - - if (grants == null || grants.size() == 0) return privileges; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), principalName, principalType, pgi)); - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, - String dbName, - String tableName, - List partValues, - String partName) { - // We don't support partition grants - return new ArrayList(); - } - - @Override - public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, - String dbName, String tableName, - String columnName) { - // We don't support column grants - return new ArrayList(); - } - - @Override - public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, - String dbName, - String tableName, - List partVals, - String partName, - String columnName) { - // We don't support column grants - return new ArrayList(); - } - - @Override - public boolean grantPrivileges(PrivilegeBag privileges) - throws InvalidObjectException, MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - for (HiveObjectPrivilege priv : privileges.getPrivileges()) { - // Locate the right object to deal with - PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv); - - // Now, let's see if we've already got this privilege - for (PrivilegeGrantInfo info : privilegeInfo.grants) { - if (info.getPrivilege().equals(priv.getGrantInfo().getPrivilege())) { - throw new InvalidObjectException(priv.getPrincipalName() + " already has " + - priv.getGrantInfo().getPrivilege() + " on " + privilegeInfo.typeErrMsg); - } - } - privilegeInfo.grants.add(priv.getGrantInfo()); - - writeBackGrantOrRevoke(priv, privilegeInfo); - } - commit = true; - return true; - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws - InvalidObjectException, MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - for (HiveObjectPrivilege priv : privileges.getPrivileges()) { - PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv); - - for (int i = 0; i < privilegeInfo.grants.size(); i++) { - if (privilegeInfo.grants.get(i).getPrivilege().equals( - priv.getGrantInfo().getPrivilege())) { - if (grantOption) privilegeInfo.grants.get(i).setGrantOption(false); - else privilegeInfo.grants.remove(i); - break; - } - } - writeBackGrantOrRevoke(priv, privilegeInfo); - } - commit = true; - return true; - } finally { - commitOrRoleBack(commit); - } - } - - private static class PrivilegeInfo { - Database db; - Table table; - List grants; - String typeErrMsg; - PrincipalPrivilegeSet privSet; - } - - private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege) - throws MetaException, NoSuchObjectException, InvalidObjectException { - PrivilegeInfo result = new PrivilegeInfo(); - switch (privilege.getHiveObject().getObjectType()) { - case GLOBAL: - try { - result.privSet = createOnNull(getHBase().getGlobalPrivs()); - } catch (IOException e) { - LOG.error("Unable to fetch global privileges", e); - throw new MetaException("Unable to fetch global privileges, " + e.getMessage()); - } - result.typeErrMsg = "global"; - break; - - case DATABASE: - result.db = getDatabase(privilege.getHiveObject().getDbName()); - result.typeErrMsg = "database " + result.db.getName(); - result.privSet = createOnNull(result.db.getPrivileges()); - break; - - case TABLE: - result.table = getTable(privilege.getHiveObject().getDbName(), - privilege.getHiveObject().getObjectName()); - result.typeErrMsg = "table " + result.table.getTableName(); - result.privSet = createOnNull(result.table.getPrivileges()); - break; - - case PARTITION: - case COLUMN: - throw new RuntimeException("HBase metastore does not support partition or column " + - "permissions"); - - default: - throw new RuntimeException("Woah bad, unknown object type " + - privilege.getHiveObject().getObjectType()); - } - - // Locate the right PrivilegeGrantInfo - Map> grantInfos; - switch (privilege.getPrincipalType()) { - case USER: - grantInfos = result.privSet.getUserPrivileges(); - result.typeErrMsg = "user"; - break; - - case GROUP: - throw new RuntimeException("HBase metastore does not support group permissions"); - - case ROLE: - grantInfos = result.privSet.getRolePrivileges(); - result.typeErrMsg = "role"; - break; - - default: - throw new RuntimeException("Woah bad, unknown principal type " + - privilege.getPrincipalType()); - } - - // Find the requested name in the grantInfo - result.grants = grantInfos.get(privilege.getPrincipalName()); - if (result.grants == null) { - // Means we don't have any grants for this user yet. - result.grants = new ArrayList(); - grantInfos.put(privilege.getPrincipalName(), result.grants); - } - return result; - } - - private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) { - // If this is the first time a user has been granted a privilege set will be null. - if (pps == null) { - pps = new PrincipalPrivilegeSet(); - } - if (pps.getUserPrivileges() == null) { - pps.setUserPrivileges(new HashMap>()); - } - if (pps.getRolePrivileges() == null) { - pps.setRolePrivileges(new HashMap>()); - } - return pps; - } - - private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi) - throws MetaException, NoSuchObjectException, InvalidObjectException { - // Now write it back - switch (priv.getHiveObject().getObjectType()) { - case GLOBAL: - try { - getHBase().putGlobalPrivs(pi.privSet); - } catch (IOException e) { - LOG.error("Unable to write global privileges", e); - throw new MetaException("Unable to write global privileges, " + e.getMessage()); - } - break; - - case DATABASE: - pi.db.setPrivileges(pi.privSet); - alterDatabase(pi.db.getName(), pi.db); - break; - - case TABLE: - pi.table.setPrivileges(pi.privSet); - alterTable(pi.table.getDbName(), pi.table.getTableName(), pi.table); - break; - - default: - throw new RuntimeException("Dude, you missed the second switch!"); - } - } - - @Override - public Role getRole(String roleName) throws NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - Role role = getHBase().getRole(roleName); - if (role == null) { - throw new NoSuchObjectException("Unable to find role " + roleName); - } - commit = true; - return role; - } catch (IOException e) { - LOG.error("Unable to get role", e); - throw new NoSuchObjectException("Error reading table " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listRoleNames() { - boolean commit = false; - openTransaction(); - try { - List roles = getHBase().scanRoles(); - List roleNames = new ArrayList(roles.size()); - for (Role role : roles) roleNames.add(role.getRoleName()); - commit = true; - return roleNames; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listRoles(String principalName, PrincipalType principalType) { - List roles = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - try { - roles.addAll(getHBase().getPrincipalDirectRoles(principalName, principalType)); - } catch (IOException e) { - throw new RuntimeException(e); - } - // Add the public role if this is a user - if (principalType == PrincipalType.USER) { - roles.add(new Role(HiveMetaStore.PUBLIC, 0, null)); - } - commit = true; - return roles; - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listRolesWithGrants(String principalName, - PrincipalType principalType) { - boolean commit = false; - openTransaction(); - try { - List roles = listRoles(principalName, principalType); - List rpgs = new ArrayList(roles.size()); - for (Role role : roles) { - HbaseMetastoreProto.RoleGrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName()); - if (grants != null) { - for (HbaseMetastoreProto.RoleGrantInfo grant : grants.getGrantInfoList()) { - if (grant.getPrincipalType() == HBaseUtils.convertPrincipalTypes(principalType) && - grant.getPrincipalName().equals(principalName)) { - rpgs.add(new RolePrincipalGrant(role.getRoleName(), principalName, principalType, - grant.getGrantOption(), (int) grant.getAddTime(), grant.getGrantor(), - HBaseUtils.convertPrincipalTypes(grant.getGrantorType()))); - } - } - } - } - commit = true; - return rpgs; - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listRoleMembers(String roleName) { - boolean commit = false; - openTransaction(); - try { - HbaseMetastoreProto.RoleGrantInfoList gil = getHBase().getRolePrincipals(roleName); - List roleMaps = new ArrayList(gil.getGrantInfoList().size()); - for (HbaseMetastoreProto.RoleGrantInfo giw : gil.getGrantInfoList()) { - roleMaps.add(new RolePrincipalGrant(roleName, giw.getPrincipalName(), - HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()), - giw.getGrantOption(), (int)giw.getAddTime(), giw.getGrantor(), - HBaseUtils.convertPrincipalTypes(giw.getGrantorType()))); - } - commit = true; - return roleMaps; - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, - String user_name, List group_names) - throws MetaException, NoSuchObjectException, InvalidObjectException { - // We don't do authorization checks for partitions. - return getPartition(dbName, tblName, partVals); - } - - @Override - public List getPartitionsWithAuth(String dbName, String tblName, short maxParts, - String userName, List groupNames) - throws MetaException, NoSuchObjectException, InvalidObjectException { - // We don't do authorization checks for partitions. - return getPartitions(dbName, tblName, maxParts); - } - - @Override - public List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, - short max_parts) - throws MetaException, NoSuchObjectException { - List parts = - listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null); - List partNames = new ArrayList(parts.size()); - for (Partition part : parts) { - partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues())); - } - return partNames; - } - - - @Override - public List listPartitionsPsWithAuth(String db_name, String tbl_name, - List part_vals, short max_parts, - String userName, List groupNames) - throws MetaException, NoSuchObjectException { - // We don't handle auth info with partitions - boolean commit = false; - openTransaction(); - try { - List parts = getHBase().scanPartitions(HiveStringUtils.normalizeIdentifier(db_name), - HiveStringUtils.normalizeIdentifier(tbl_name), part_vals, max_parts); - commit = true; - return parts; - } catch (IOException e) { - LOG.error("Unable to list partition names", e); - throw new MetaException("Failed to list part names, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws - NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean commit = false; - openTransaction(); - try { - //update table properties - List statsObjs = colStats.getStatsObj(); - List colNames = new ArrayList<>(); - for (ColumnStatisticsObj statsObj:statsObjs) { - colNames.add(statsObj.getColName()); - } - String dbName = colStats.getStatsDesc().getDbName(); - String tableName = colStats.getStatsDesc().getTableName(); - Table newTable = getTable(dbName, tableName); - Table newTableCopy = newTable.deepCopy(); - StatsSetupConst.setColumnStatsState(newTableCopy.getParameters(), colNames); - getHBase().replaceTable(newTable, newTableCopy); - - getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), null, colStats); - - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to update column statistics", e); - throw new MetaException("Failed to update column statistics, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, - List partVals) throws - NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean commit = false; - openTransaction(); - try { - // update partition properties - String db_name = colStats.getStatsDesc().getDbName(); - String tbl_name = colStats.getStatsDesc().getTableName(); - Partition oldPart = getHBase().getPartition(db_name, tbl_name, partVals); - Partition new_partCopy = oldPart.deepCopy(); - List colNames = new ArrayList<>(); - List statsObjs = colStats.getStatsObj(); - for (ColumnStatisticsObj statsObj : statsObjs) { - colNames.add(statsObj.getColName()); - } - StatsSetupConst.setColumnStatsState(new_partCopy.getParameters(), colNames); - getHBase().replacePartition(oldPart, new_partCopy, - HBaseUtils.getPartitionKeyTypes(getTable(db_name, tbl_name).getPartitionKeys())); - - getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), partVals, colStats); - // We need to invalidate aggregates that include this partition - getHBase().getStatsCache().invalidate(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), colStats.getStatsDesc().getPartName()); - - commit = true; - return true; - } catch (IOException e) { - LOG.error("Unable to update column statistics", e); - throw new MetaException("Failed to update column statistics, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, - List colName) throws MetaException, - NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - ColumnStatistics cs = getHBase().getTableStatistics(dbName, tableName, colName); - commit = true; - return cs; - } catch (IOException e) { - LOG.error("Unable to fetch column statistics", e); - throw new MetaException("Failed to fetch column statistics, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getPartitionColumnStatistics(String dbName, String tblName, - List partNames, List colNames) throws MetaException, NoSuchObjectException { - List> partVals = new ArrayList>(partNames.size()); - for (String partName : partNames) { - partVals.add(partNameToVals(partName)); - } - boolean commit = false; - openTransaction(); - try { - List cs = - getHBase().getPartitionStatistics(dbName, tblName, partNames, partVals, colNames); - commit = true; - return cs; - } catch (IOException e) { - LOG.error("Unable to fetch column statistics", e); - throw new MetaException("Failed fetching column statistics, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - List partVals, String colName) throws NoSuchObjectException, MetaException, - InvalidObjectException, InvalidInputException { - // NOP, stats will be deleted along with the partition when it is dropped. - return true; - } - - @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws - NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - // NOP, stats will be deleted along with the table when it is dropped. - return true; - } - - /** - * Return aggregated statistics for each column in the colNames list aggregated over partitions in - * the partNames list - * - */ - @Override - public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, - List colNames) throws MetaException, NoSuchObjectException { - List> partVals = new ArrayList>(partNames.size()); - for (String partName : partNames) { - partVals.add(partNameToVals(partName)); - } - boolean commit = false; - boolean hasAnyStats = false; - openTransaction(); - try { - AggrStats aggrStats = new AggrStats(); - aggrStats.setPartsFound(0); - for (String colName : colNames) { - try { - AggrStats oneCol = - getHBase().getStatsCache().get(dbName, tblName, partNames, colName); - if (oneCol.getColStatsSize() > 0) { - assert oneCol.getColStatsSize() == 1; - aggrStats.setPartsFound(oneCol.getPartsFound()); - aggrStats.addToColStats(oneCol.getColStats().get(0)); - hasAnyStats = true; - } - } catch (CacheLoader.InvalidCacheLoadException e) { - LOG.debug("Found no stats for column " + colName); - // This means we have no stats at all for this column for these partitions, so just - // move on. - } - } - commit = true; - if (!hasAnyStats) { - // Set the required field. - aggrStats.setColStats(new ArrayList()); - } - return aggrStats; - } catch (IOException e) { - LOG.error("Unable to fetch aggregate column statistics", e); - throw new MetaException("Failed fetching aggregate column statistics, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public long cleanupEvents() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean addToken(String tokenIdentifier, String delegationToken) { - boolean commit = false; - openTransaction(); - try { - getHBase().putDelegationToken(tokenIdentifier, delegationToken); - commit = true; - return commit; // See HIVE-11302, for now always returning true - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean removeToken(String tokenIdentifier) { - boolean commit = false; - openTransaction(); - try { - getHBase().deleteDelegationToken(tokenIdentifier); - commit = true; - return commit; // See HIVE-11302, for now always returning true - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public String getToken(String tokenIdentifier) { - boolean commit = false; - openTransaction(); - try { - String token = getHBase().getDelegationToken(tokenIdentifier); - commit = true; - return token; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getAllTokenIdentifiers() { - boolean commit = false; - openTransaction(); - try { - List ids = getHBase().scanDelegationTokenIdentifiers(); - commit = true; - return ids; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public int addMasterKey(String key) throws MetaException { - boolean commit = false; - openTransaction(); - try { - long seq = getHBase().getNextSequence(HBaseReadWrite.MASTER_KEY_SEQUENCE); - getHBase().putMasterKey((int) seq, key); - commit = true; - return (int)seq; - } catch (IOException e) { - LOG.error("Unable to add master key", e); - throw new MetaException("Failed adding master key, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, - MetaException { - boolean commit = false; - openTransaction(); - try { - if (getHBase().getMasterKey(seqNo) == null) { - throw new NoSuchObjectException("No key found with keyId: " + seqNo); - } - getHBase().putMasterKey(seqNo, key); - commit = true; - } catch (IOException e) { - LOG.error("Unable to update master key", e); - throw new MetaException("Failed updating master key, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public boolean removeMasterKey(Integer keySeq) { - boolean commit = false; - openTransaction(); - try { - getHBase().deleteMasterKey(keySeq); - commit = true; - return true; // See HIVE-11302, for now always returning true - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public String[] getMasterKeys() { - boolean commit = false; - openTransaction(); - try { - List keys = getHBase().scanMasterKeys(); - commit = true; - return keys.toArray(new String[keys.size()]); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void verifySchema() throws MetaException { - - } - - @Override - public String getMetaStoreSchemaVersion() throws MetaException { - throw new UnsupportedOperationException(); - } - - @Override - public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { - throw new UnsupportedOperationException(); - } - - @Override - public void dropPartitions(String dbName, String tblName, List partNames) throws - MetaException, NoSuchObjectException { - boolean commit = false; - openTransaction(); - try { - for (String partName : partNames) { - dropPartition(dbName, tblName, partNameToVals(partName)); - } - commit = true; - } catch (Exception e) { - LOG.error("Unable to drop partitions", e); - throw new NoSuchObjectException("Failure dropping partitions, " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPrincipalDBGrantsAll(String principalName, - PrincipalType principalType) { - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - List dbs = getHBase().scanDatabases(null); - for (Database db : dbs) { - List grants; - - PrincipalPrivilegeSet pps = db.getPrivileges(); - if (pps == null) continue; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - - if (map == null) continue; - grants = map.get(principalName); - if (grants == null || grants.size() == 0) continue; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, - db.getName(), null, null, null), principalName, principalType, pgi)); - } - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPrincipalTableGrantsAll(String principalName, - PrincipalType principalType) { - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - List
tables = getHBase().scanTables(null, null); - for (Table table : tables) { - List grants; - - PrincipalPrivilegeSet pps = table.getPrivileges(); - if (pps == null) continue; - Map> map; - switch (principalType) { - case USER: - map = pps.getUserPrivileges(); - break; - - case ROLE: - map = pps.getRolePrivileges(); - break; - - default: - throw new RuntimeException("Unknown or unsupported principal type " + - principalType.toString()); - } - - if (map == null) continue; - grants = map.get(principalName); - if (grants == null || grants.size() == 0) continue; - for (PrivilegeGrantInfo pgi : grants) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, - table.getDbName(), table.getTableName(), null, null), principalName, principalType, - pgi)); - } - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPrincipalPartitionGrantsAll(String principalName, - PrincipalType principalType) { - return new ArrayList(); - } - - @Override - public List listPrincipalTableColumnGrantsAll(String principalName, - PrincipalType principalType) { - return new ArrayList(); - } - - @Override - public List listPrincipalPartitionColumnGrantsAll(String principalName, - PrincipalType principalType) { - return new ArrayList(); - } - - @Override - public List listGlobalGrantsAll() { - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs(); - if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), e.getKey(), PrincipalType.USER, pgi)); - } - } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, - null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); - } - } - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listDBGrantsAll(String dbName) { - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - Database db = getHBase().getDb(dbName); - PrincipalPrivilegeSet pps = db.getPrivileges(); - if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, - null, null, null), e.getKey(), PrincipalType.USER, pgi)); - } - } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, - null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); - } - } - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, - String partitionName, - String columnName) { - return new ArrayList(); - } - - @Override - public List listTableGrantsAll(String dbName, String tableName) { - List privileges = new ArrayList(); - boolean commit = false; - openTransaction(); - try { - Table table = getHBase().getTable(dbName, tableName); - PrincipalPrivilegeSet pps = table.getPrivileges(); - if (pps != null) { - for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), e.getKey(), PrincipalType.USER, pgi)); - } - } - for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { - for (PrivilegeGrantInfo pgi : e.getValue()) { - privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi)); - } - } - } - commit = true; - return privileges; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List listPartitionGrantsAll(String dbName, String tableName, - String partitionName) { - return new ArrayList(); - } - - @Override - public List listTableColumnGrantsAll(String dbName, String tableName, - String columnName) { - return new ArrayList(); - } - - @Override - public void createFunction(Function func) throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - getHBase().putFunction(func); - commit = true; - } catch (IOException e) { - LOG.error("Unable to create function", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void alterFunction(String dbName, String funcName, Function newFunction) throws - InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - getHBase().putFunction(newFunction); - commit = true; - } catch (IOException e) { - LOG.error("Unable to alter function ", e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void dropFunction(String dbName, String funcName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean commit = false; - openTransaction(); - try { - getHBase().deleteFunction(dbName, funcName); - commit = true; - } catch (IOException e) { - LOG.error("Unable to delete function" + e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public Function getFunction(String dbName, String funcName) throws MetaException { - boolean commit = false; - openTransaction(); - try { - Function func = getHBase().getFunction(dbName, funcName); - commit = true; - return func; - } catch (IOException e) { - LOG.error("Unable to get function" + e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getAllFunctions() throws MetaException { - boolean commit = false; - openTransaction(); - try { - List funcs = getHBase().scanFunctions(null, ".*"); - commit = true; - return funcs; - } catch (IOException e) { - LOG.error("Unable to get functions" + e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getFunctions(String dbName, String pattern) throws MetaException { - boolean commit = false; - openTransaction(); - try { - List funcs = getHBase().scanFunctions(dbName, likeToRegex(pattern)); - List funcNames = new ArrayList(funcs.size()); - for (Function func : funcs) funcNames.add(func.getFunctionName()); - commit = true; - return funcNames; - } catch (IOException e) { - LOG.error("Unable to get functions" + e); - throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - throw new UnsupportedOperationException(); - } - - @Override - public void addNotificationEvent(NotificationEvent event) { - throw new UnsupportedOperationException(); - } - - @Override - public void cleanNotificationEvents(int olderThan) { - throw new UnsupportedOperationException(); - } - - @Override - public CurrentNotificationEventId getCurrentNotificationEventId() { - throw new UnsupportedOperationException(); - } - - @Override - public void flushCache() { - getHBase().flushCatalogCache(); - } - - @Override - public void setConf(Configuration configuration) { - // initialize expressionProxy. Also re-initialize it if - // setConf is being called with new configuration object (though that - // is not expected to happen, doing it just for safety) - // TODO: why not re-intialize HBaseReadWrite? - Configuration oldConf = conf; - conf = configuration; - if (expressionProxy != null && conf != oldConf) { - LOG.warn("Unexpected setConf when we were already configured"); - } - if (expressionProxy == null || conf != oldConf) { - expressionProxy = PartFilterExprUtil.createExpressionProxy(conf); - } - if (conf != oldConf) { - fmHandlers = HiveMetaStore.createHandlerMap(); - configureFileMetadataHandlers(fmHandlers.values()); - } - } - - private void configureFileMetadataHandlers(Collection fmHandlers) { - for (FileMetadataHandler fmh : fmHandlers) { - fmh.configure(conf, expressionProxy, getHBase()); - } - } - - @Override - public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { - return fmHandlers.get(type); - } - - @Override - public Configuration getConf() { - return conf; - - } - - private HBaseReadWrite getHBase() { - if (hbase == null) { - HBaseReadWrite.setConf(conf); - hbase = HBaseReadWrite.getInstance(); - } - return hbase; - } - - // This is for building error messages only. It does not look up anything in the metastore. - private String tableNameForErrorMsg(String dbName, String tableName) { - return dbName + "." + tableName; - } - - // This is for building error messages only. It does not look up anything in the metastore as - // they may just throw another error. - private String partNameForErrorMsg(String dbName, String tableName, List partVals) { - return tableNameForErrorMsg(dbName, tableName) + "." + StringUtils.join(partVals, ':'); - } - - // This is for building error messages only. It does not look up anything in the metastore as - // they may just throw another error. - private String indexNameForErrorMsg(String dbName, String origTableName, String indexName) { - return tableNameForErrorMsg(dbName, origTableName) + "." + indexName; - } - - private String buildExternalPartName(Table table, Partition part) { - return buildExternalPartName(table, part.getValues()); - } - - private String buildExternalPartName(String dbName, String tableName, List partVals) - throws MetaException { - return buildExternalPartName(getTable(dbName, tableName), partVals); - } - - private Set findUsersToRemapRolesFor(Role role, String principalName, PrincipalType type) - throws IOException, NoSuchObjectException { - Set usersToRemap; - switch (type) { - case USER: - // In this case it's just the user being added to the role that we need to remap for. - usersToRemap = new HashSet(); - usersToRemap.add(principalName); - break; - - case ROLE: - // In this case we need to remap for all users in the containing role (not the role being - // granted into the containing role). - usersToRemap = getHBase().findAllUsersInRole(role.getRoleName()); - break; - - default: - throw new RuntimeException("Unknown principal type " + type); - - } - return usersToRemap; - } - - /** - * Build a partition name for external use. Necessary since HBase itself doesn't store - * partition names. - * @param table table object - * @param partVals partition values. - * @return - */ - static String buildExternalPartName(Table table, List partVals) { - List partCols = new ArrayList(); - for (FieldSchema pc : table.getPartitionKeys()) partCols.add(pc.getName()); - return FileUtils.makePartName(partCols, partVals); - } - - private static List partNameToVals(String name) { - if (name == null) return null; - List vals = new ArrayList(); - String[] kvp = name.split("/"); - for (String kv : kvp) { - vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1))); - } - return vals; - } - - static List> partNameListToValsList(List partNames) { - List> valLists = new ArrayList>(partNames.size()); - for (String partName : partNames) { - valLists.add(partNameToVals(partName)); - } - return valLists; - } - - private String likeToRegex(String like) { - if (like == null) return null; - // Convert Hive's strange like syntax to Java regex. Per - // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Show - // the supported syntax is that * means Java .* and | means 'or' - // This implementation leaves other regular expression syntax alone, which means people can - // use it, even though it wouldn't work on RDBMS backed metastores. - return like.replace("*", ".*"); - } - - private void commitOrRoleBack(boolean commit) { - if (commit) { - LOG.debug("Committing transaction"); - commitTransaction(); - } else { - LOG.debug("Rolling back transaction"); - rollbackTransaction(); - } - } - - @VisibleForTesting HBaseReadWrite backdoor() { - return getHBase(); - } - - @Override - public boolean isFileMetadataSupported() { - return true; - } - - @Override - public ByteBuffer[] getFileMetadata(List fileIds) throws MetaException { - openTransaction(); - boolean commit = true; - try { - return getHBase().getFileMetadata(fileIds); - } catch (IOException e) { - commit = false; - LOG.error("Unable to get file metadata", e); - throw new MetaException("Error reading file metadata " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] expr, - ByteBuffer[] metadatas, ByteBuffer[] results, boolean[] eliminated) throws MetaException { - FileMetadataHandler fmh = fmHandlers.get(type); - boolean commit = true; - try { - fmh.getFileMetadataByExpr(fileIds, expr, metadatas, results, eliminated); - } catch (IOException e) { - LOG.error("Unable to get file metadata by expr", e); - commit = false; - throw new MetaException("Error reading file metadata by expr" + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void putFileMetadata(List fileIds, List metadata, - FileMetadataExprType type) throws MetaException { - openTransaction(); - boolean commit = false; - try { - ByteBuffer[][] addedVals = null; - ByteBuffer[] addedCols = null; - if (type != null) { - FileMetadataHandler fmh = fmHandlers.get(type); - addedCols = fmh.createAddedCols(); - if (addedCols != null) { - addedVals = fmh.createAddedColVals(metadata); - } - } - getHBase().storeFileMetadata(fileIds, metadata, addedCols, addedVals); - commit = true; - } catch (IOException | InterruptedException e) { - LOG.error("Unable to store file metadata", e); - throw new MetaException("Error storing file metadata " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getPrimaryKeys(String db_name, String tbl_name) throws MetaException { - db_name = HiveStringUtils.normalizeIdentifier(db_name); - tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); - boolean commit = false; - openTransaction(); - try { - List pk = getHBase().getPrimaryKey(db_name, tbl_name); - commit = true; - return pk; - } catch (IOException e) { - LOG.error("Unable to get primary key", e); - throw new MetaException("Error reading db " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getForeignKeys(String parent_db_name, String parent_tbl_name, - String foreign_db_name, String foreign_tbl_name) - throws MetaException { - parent_db_name = parent_db_name!=null?HiveStringUtils.normalizeIdentifier(parent_db_name):null; - parent_tbl_name = parent_tbl_name!=null?HiveStringUtils.normalizeIdentifier(parent_tbl_name):null; - foreign_db_name = HiveStringUtils.normalizeIdentifier(foreign_db_name); - foreign_tbl_name = HiveStringUtils.normalizeIdentifier(foreign_tbl_name); - boolean commit = false; - openTransaction(); - try { - List fks = getHBase().getForeignKeys(foreign_db_name, foreign_tbl_name); - if (fks == null || fks.size() == 0) return null; - List result = new ArrayList<>(fks.size()); - for (SQLForeignKey fkcol : fks) { - if ((parent_db_name == null || fkcol.getPktable_db().equals(parent_db_name)) && - (parent_tbl_name == null || fkcol.getPktable_name().equals(parent_tbl_name))) { - result.add(fkcol); - } - } - commit = true; - return result; - } catch (IOException e) { - LOG.error("Unable to get foreign key", e); - throw new MetaException("Error reading db " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getUniqueConstraints(String db_name, String tbl_name) - throws MetaException { - db_name = HiveStringUtils.normalizeIdentifier(db_name); - tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); - boolean commit = false; - openTransaction(); - try { - List uk = getHBase().getUniqueConstraint(db_name, tbl_name); - commit = true; - return uk; - } catch (IOException e) { - LOG.error("Unable to get unique constraint", e); - throw new MetaException("Error reading db " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public List getNotNullConstraints(String db_name, String tbl_name) - throws MetaException { - db_name = HiveStringUtils.normalizeIdentifier(db_name); - tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name); - boolean commit = false; - openTransaction(); - try { - List nn = getHBase().getNotNullConstraint(db_name, tbl_name); - commit = true; - return nn; - } catch (IOException e) { - LOG.error("Unable to get not null constraint", e); - throw new MetaException("Error reading db " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void createTableWithConstraints(Table tbl, List primaryKeys, - List foreignKeys, List uniqueConstraints, - List notNullConstraints) - throws InvalidObjectException, MetaException { - boolean commit = false; - openTransaction(); - try { - createTable(tbl); - if (primaryKeys != null) addPrimaryKeys(primaryKeys); - if (foreignKeys != null) addForeignKeys(foreignKeys); - if (uniqueConstraints != null) addUniqueConstraints(uniqueConstraints); - if (notNullConstraints != null) addNotNullConstraints(notNullConstraints); - commit = true; - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void dropConstraint(String dbName, String tableName, String constraintName) - throws NoSuchObjectException { - // This is something of pain, since we have to search both primary key and foreign key to see - // which they want to drop. - boolean commit = false; - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - constraintName = HiveStringUtils.normalizeIdentifier(constraintName); - openTransaction(); - try { - List pk = getHBase().getPrimaryKey(dbName, tableName); - if (pk != null && pk.size() > 0 && pk.get(0).getPk_name().equals(constraintName)) { - getHBase().deletePrimaryKey(dbName, tableName); - commit = true; - return; - } - - List fks = getHBase().getForeignKeys(dbName, tableName); - if (fks != null && fks.size() > 0) { - List newKeyList = new ArrayList<>(fks.size()); - // Make a new list of keys that excludes all columns from the constraint we're dropping. - for (SQLForeignKey fkcol : fks) { - if (!fkcol.getFk_name().equals(constraintName)) newKeyList.add(fkcol); - } - // If we've dropped only one foreign key out of many keys, than update so that we still - // have the existing keys. Otherwise drop the foreign keys all together. - if (newKeyList.size() > 0) getHBase().putForeignKeys(newKeyList); - else getHBase().deleteForeignKeys(dbName, tableName); - commit = true; - return; - } - - List uk = getHBase().getUniqueConstraint(dbName, tableName); - if (uk != null && uk.size() > 0 && uk.get(0).getUk_name().equals(constraintName)) { - getHBase().deleteUniqueConstraint(dbName, tableName); - commit = true; - return; - } - - List nn = getHBase().getNotNullConstraint(dbName, tableName); - if (nn != null && nn.size() > 0 && nn.get(0).getNn_name().equals(constraintName)) { - getHBase().deleteNotNullConstraint(dbName, tableName); - commit = true; - return; - } - - commit = true; - throw new NoSuchObjectException("Unable to find constraint named " + constraintName + - " on table " + tableNameForErrorMsg(dbName, tableName)); - } catch (IOException e) { - LOG.error("Error fetching primary key for table " + tableNameForErrorMsg(dbName, tableName), e); - throw new NoSuchObjectException("Error fetching primary key for table " + - tableNameForErrorMsg(dbName, tableName) + " : " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { - boolean commit = false; - for (SQLPrimaryKey pk : pks) { - pk.setTable_db(HiveStringUtils.normalizeIdentifier(pk.getTable_db())); - pk.setTable_name(HiveStringUtils.normalizeIdentifier(pk.getTable_name())); - pk.setColumn_name(HiveStringUtils.normalizeIdentifier(pk.getColumn_name())); - pk.setPk_name(HiveStringUtils.normalizeIdentifier(pk.getPk_name())); - } - openTransaction(); - try { - List currentPk = - getHBase().getPrimaryKey(pks.get(0).getTable_db(), pks.get(0).getTable_name()); - if (currentPk != null) { - throw new MetaException(" Primary key already exists for: " + - tableNameForErrorMsg(pks.get(0).getTable_db(), pks.get(0).getTable_name())); - } - getHBase().putPrimaryKey(pks); - commit = true; - } catch (IOException e) { - LOG.error("Error writing primary key", e); - throw new MetaException("Error writing primary key: " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { - boolean commit = false; - for (SQLForeignKey fk : fks) { - fk.setPktable_db(HiveStringUtils.normalizeIdentifier(fk.getPktable_db())); - fk.setPktable_name(HiveStringUtils.normalizeIdentifier(fk.getPktable_name())); - fk.setFktable_db(HiveStringUtils.normalizeIdentifier(fk.getFktable_db())); - fk.setFktable_name(HiveStringUtils.normalizeIdentifier(fk.getFktable_name())); - fk.setFk_name(HiveStringUtils.normalizeIdentifier(fk.getFk_name())); - } - openTransaction(); - try { - // Fetch the existing keys (if any) and add in these new ones - List existing = - getHBase().getForeignKeys(fks.get(0).getFktable_db(), fks.get(0).getFktable_name()); - if (existing == null) existing = new ArrayList<>(fks.size()); - existing.addAll(fks); - getHBase().putForeignKeys(existing); - commit = true; - } catch (IOException e) { - LOG.error("Error writing foreign keys", e); - throw new MetaException("Error writing foreign keys: " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - public void addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { - boolean commit = false; - for (SQLUniqueConstraint uk : uks) { - uk.setTable_db(HiveStringUtils.normalizeIdentifier(uk.getTable_db())); - uk.setTable_name(HiveStringUtils.normalizeIdentifier(uk.getTable_name())); - uk.setColumn_name(HiveStringUtils.normalizeIdentifier(uk.getColumn_name())); - uk.setUk_name(HiveStringUtils.normalizeIdentifier(uk.getUk_name())); - } - openTransaction(); - try { - getHBase().putUniqueConstraints(uks); - commit = true; - } catch (IOException e) { - LOG.error("Error writing unique constraints", e); - throw new MetaException("Error writing unique constraints: " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public void addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { - boolean commit = false; - for (SQLNotNullConstraint nn : nns) { - nn.setTable_db(HiveStringUtils.normalizeIdentifier(nn.getTable_db())); - nn.setTable_name(HiveStringUtils.normalizeIdentifier(nn.getTable_name())); - nn.setColumn_name(HiveStringUtils.normalizeIdentifier(nn.getColumn_name())); - nn.setNn_name(HiveStringUtils.normalizeIdentifier(nn.getNn_name())); - } - openTransaction(); - try { - getHBase().putNotNullConstraints(nns); - commit = true; - } catch (IOException e) { - LOG.error("Error writing not null constraints", e); - throw new MetaException("Error writing not null constraints: " + e.getMessage()); - } finally { - commitOrRoleBack(commit); - } - } - - @Override - public Map> getColStatsForTablePartitions(String dbName, - String tableName) throws MetaException, NoSuchObjectException { - // TODO: see if it makes sense to implement this here - return null; - } - - @Override - public String getMetastoreDbUuid() throws MetaException { - throw new MetaException("Get metastore DB uuid is not implemented"); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java deleted file mode 100644 index 6b7eb9e..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ /dev/null @@ -1,1781 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.ByteStream.Output; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; -import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDeWithEndPrefix; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hive.common.util.BloomFilter; -import org.apache.hive.common.util.HiveStringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Utility functions - */ -public class HBaseUtils { - - final static Charset ENCODING = StandardCharsets.UTF_8; - final static char KEY_SEPARATOR = '\u0001'; - final static String KEY_SEPARATOR_STR = new String(new char[] {KEY_SEPARATOR}); - - static final private Logger LOG = LoggerFactory.getLogger(HBaseUtils.class.getName()); - - /** - * Build a key for an object in hbase - * @param components - * @return - */ - static byte[] buildKey(String... components) { - return buildKey(false, components); - } - - static byte[] buildKeyWithTrailingSeparator(String... components) { - return buildKey(true, components); - } - - private static byte[] buildKey(boolean trailingSeparator, String... components) { - String protoKey = StringUtils.join(components, KEY_SEPARATOR); - if (trailingSeparator) protoKey += KEY_SEPARATOR; - return protoKey.getBytes(ENCODING); - } - - private static HbaseMetastoreProto.Parameters buildParameters(Map params) { - List entries = new ArrayList<>(); - for (Map.Entry e : params.entrySet()) { - entries.add( - HbaseMetastoreProto.ParameterEntry.newBuilder() - .setKey(e.getKey()) - .setValue(e.getValue()) - .build()); - } - return HbaseMetastoreProto.Parameters.newBuilder() - .addAllParameter(entries) - .build(); - } - - private static Map buildParameters(HbaseMetastoreProto.Parameters protoParams) { - Map params = new HashMap<>(); - for (HbaseMetastoreProto.ParameterEntry pe : protoParams.getParameterList()) { - params.put(pe.getKey(), pe.getValue()); - } - return params; - } - - - private static List - buildPrincipalPrivilegeSetEntry(Map> entries) { - List results = new ArrayList<>(); - for (Map.Entry> entry : entries.entrySet()) { - results.add(HbaseMetastoreProto.PrincipalPrivilegeSetEntry.newBuilder() - .setPrincipalName(entry.getKey()) - .addAllPrivileges(buildPrivilegeGrantInfo(entry.getValue())) - .build()); - } - return results; - } - - private static List buildPrivilegeGrantInfo( - List privileges) { - List results = new ArrayList<>(); - for (PrivilegeGrantInfo privilege : privileges) { - HbaseMetastoreProto.PrivilegeGrantInfo.Builder builder = - HbaseMetastoreProto.PrivilegeGrantInfo.newBuilder(); - if (privilege.getPrivilege() != null) builder.setPrivilege(privilege.getPrivilege()); - builder.setCreateTime(privilege.getCreateTime()); - if (privilege.getGrantor() != null) builder.setGrantor(privilege.getGrantor()); - if (privilege.getGrantorType() != null) { - builder.setGrantorType(convertPrincipalTypes(privilege.getGrantorType())); - } - builder.setGrantOption(privilege.isGrantOption()); - results.add(builder.build()); - } - return results; - } - - /** - * Convert Thrift.PrincipalType to HbaseMetastoreProto.principalType - * @param type - * @return - */ - static HbaseMetastoreProto.PrincipalType convertPrincipalTypes(PrincipalType type) { - switch (type) { - case USER: return HbaseMetastoreProto.PrincipalType.USER; - case ROLE: return HbaseMetastoreProto.PrincipalType.ROLE; - default: throw new RuntimeException("Unknown principal type " + type.toString()); - } - } - - /** - * Convert principalType from HbaseMetastoreProto to Thrift.PrincipalType - * @param type - * @return - */ - static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType type) { - switch (type) { - case USER: return PrincipalType.USER; - case ROLE: return PrincipalType.ROLE; - default: throw new RuntimeException("Unknown principal type " + type.toString()); - } - } - - private static Map> convertPrincipalPrivilegeSetEntries( - List entries) { - Map> map = new HashMap<>(); - for (HbaseMetastoreProto.PrincipalPrivilegeSetEntry entry : entries) { - map.put(entry.getPrincipalName(), convertPrivilegeGrantInfos(entry.getPrivilegesList())); - } - return map; - } - - private static List convertPrivilegeGrantInfos( - List privileges) { - List results = new ArrayList<>(); - for (HbaseMetastoreProto.PrivilegeGrantInfo proto : privileges) { - PrivilegeGrantInfo pgi = new PrivilegeGrantInfo(); - if (proto.hasPrivilege()) pgi.setPrivilege(proto.getPrivilege()); - pgi.setCreateTime((int)proto.getCreateTime()); - if (proto.hasGrantor()) pgi.setGrantor(proto.getGrantor()); - if (proto.hasGrantorType()) { - pgi.setGrantorType(convertPrincipalTypes(proto.getGrantorType())); - } - if (proto.hasGrantOption()) pgi.setGrantOption(proto.getGrantOption()); - results.add(pgi); - } - return results; - } - - private static HbaseMetastoreProto.PrincipalPrivilegeSet - buildPrincipalPrivilegeSet(PrincipalPrivilegeSet pps) { - HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builder = - HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(); - if (pps.getUserPrivileges() != null) { - builder.addAllUsers(buildPrincipalPrivilegeSetEntry(pps.getUserPrivileges())); - } - if (pps.getRolePrivileges() != null) { - builder.addAllRoles(buildPrincipalPrivilegeSetEntry(pps.getRolePrivileges())); - } - return builder.build(); - } - - private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet( - HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException { - PrincipalPrivilegeSet pps = null; - if (!proto.getUsersList().isEmpty() || !proto.getRolesList().isEmpty()) { - pps = new PrincipalPrivilegeSet(); - if (!proto.getUsersList().isEmpty()) { - pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList())); - } - if (!proto.getRolesList().isEmpty()) { - pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList())); - } - } - return pps; - } - /** - * Serialize a PrincipalPrivilegeSet - * @param pps - * @return - */ - static byte[] serializePrincipalPrivilegeSet(PrincipalPrivilegeSet pps) { - return buildPrincipalPrivilegeSet(pps).toByteArray(); - } - - /** - * Deserialize a PrincipalPrivilegeSet - * @param serialized - * @return - * @throws InvalidProtocolBufferException - */ - static PrincipalPrivilegeSet deserializePrincipalPrivilegeSet(byte[] serialized) - throws InvalidProtocolBufferException { - HbaseMetastoreProto.PrincipalPrivilegeSet proto = - HbaseMetastoreProto.PrincipalPrivilegeSet.parseFrom(serialized); - return buildPrincipalPrivilegeSet(proto); - } - - /** - * Serialize a role - * @param role - * @return two byte arrays, first contains the key, the second the serialized value. - */ - static byte[][] serializeRole(Role role) { - byte[][] result = new byte[2][]; - result[0] = buildKey(role.getRoleName()); - HbaseMetastoreProto.Role.Builder builder = HbaseMetastoreProto.Role.newBuilder(); - builder.setCreateTime(role.getCreateTime()); - if (role.getOwnerName() != null) builder.setOwnerName(role.getOwnerName()); - result[1] = builder.build().toByteArray(); - return result; - } - - /** - * Deserialize a role. This method should be used when the rolename is already known as it - * doesn't have to re-deserialize it. - * @param roleName name of the role - * @param value value fetched from hbase - * @return A role - * @throws InvalidProtocolBufferException - */ - static Role deserializeRole(String roleName, byte[] value) - throws InvalidProtocolBufferException { - Role role = new Role(); - role.setRoleName(roleName); - HbaseMetastoreProto.Role protoRole = - HbaseMetastoreProto.Role.parseFrom(value); - role.setCreateTime((int)protoRole.getCreateTime()); - if (protoRole.hasOwnerName()) role.setOwnerName(protoRole.getOwnerName()); - return role; - } - - /** - * Deserialize a role. This method should be used when the rolename is not already known (eg - * when doing a scan). - * @param key key from hbase - * @param value value from hbase - * @return a role - * @throws InvalidProtocolBufferException - */ - static Role deserializeRole(byte[] key, byte[] value) - throws InvalidProtocolBufferException { - String roleName = new String(key, ENCODING); - return deserializeRole(roleName, value); - } - - /** - * Serialize a list of role names - * @param roles - * @return - */ - static byte[] serializeRoleList(List roles) { - return HbaseMetastoreProto.RoleList.newBuilder() - .addAllRole(roles) - .build() - .toByteArray(); - } - - static List deserializeRoleList(byte[] value) throws InvalidProtocolBufferException { - HbaseMetastoreProto.RoleList proto = HbaseMetastoreProto.RoleList.parseFrom(value); - return new ArrayList<>(proto.getRoleList()); - } - - /** - * Serialize a database - * @param db - * @return two byte arrays, first contains the key, the second the serialized value. - */ - static byte[][] serializeDatabase(Database db) { - byte[][] result = new byte[2][]; - result[0] = buildKey(HiveStringUtils.normalizeIdentifier(db.getName())); - HbaseMetastoreProto.Database.Builder builder = HbaseMetastoreProto.Database.newBuilder(); - - if (db.getDescription() != null) builder.setDescription(db.getDescription()); - if (db.getLocationUri() != null) builder.setUri(db.getLocationUri()); - if (db.getParameters() != null) builder.setParameters(buildParameters(db.getParameters())); - if (db.getPrivileges() != null) { - builder.setPrivileges(buildPrincipalPrivilegeSet(db.getPrivileges())); - } - if (db.getOwnerName() != null) builder.setOwnerName(db.getOwnerName()); - if (db.getOwnerType() != null) builder.setOwnerType(convertPrincipalTypes(db.getOwnerType())); - - result[1] = builder.build().toByteArray(); - return result; - } - - - /** - * Deserialize a database. This method should be used when the db anme is already known as it - * doesn't have to re-deserialize it. - * @param dbName name of the role - * @param value value fetched from hbase - * @return A database - * @throws InvalidProtocolBufferException - */ - static Database deserializeDatabase(String dbName, byte[] value) - throws InvalidProtocolBufferException { - Database db = new Database(); - db.setName(dbName); - HbaseMetastoreProto.Database protoDb = HbaseMetastoreProto.Database.parseFrom(value); - if (protoDb.hasDescription()) db.setDescription(protoDb.getDescription()); - if (protoDb.hasUri()) db.setLocationUri(protoDb.getUri()); - if (protoDb.hasParameters()) db.setParameters(buildParameters(protoDb.getParameters())); - if (protoDb.hasPrivileges()) { - db.setPrivileges(buildPrincipalPrivilegeSet(protoDb.getPrivileges())); - } - if (protoDb.hasOwnerName()) db.setOwnerName(protoDb.getOwnerName()); - if (protoDb.hasOwnerType()) db.setOwnerType(convertPrincipalTypes(protoDb.getOwnerType())); - - return db; - } - - /** - * Deserialize a database. This method should be used when the db name is not already known (eg - * when doing a scan). - * @param key key from hbase - * @param value value from hbase - * @return a role - * @throws InvalidProtocolBufferException - */ - static Database deserializeDatabase(byte[] key, byte[] value) - throws InvalidProtocolBufferException { - String dbName = new String(key, ENCODING); - return deserializeDatabase(dbName, value); - } - - /** - * Serialize a function - * @param func function to serialize - * @return two byte arrays, first contains the key, the second the value. - */ - static byte[][] serializeFunction(Function func) { - byte[][] result = new byte[2][]; - result[0] = buildKey(func.getDbName(), func.getFunctionName()); - HbaseMetastoreProto.Function.Builder builder = HbaseMetastoreProto.Function.newBuilder(); - if (func.getClassName() != null) builder.setClassName(func.getClassName()); - if (func.getOwnerName() != null) builder.setOwnerName(func.getOwnerName()); - if (func.getOwnerType() != null) { - builder.setOwnerType(convertPrincipalTypes(func.getOwnerType())); - } - builder.setCreateTime(func.getCreateTime()); - if (func.getFunctionType() != null) { - builder.setFunctionType(convertFunctionTypes(func.getFunctionType())); - } - if (func.getResourceUris() != null) { - for (ResourceUri uri : func.getResourceUris()) { - builder.addResourceUris(HbaseMetastoreProto.Function.ResourceUri.newBuilder() - .setResourceType(convertResourceTypes(uri.getResourceType())) - .setUri(uri.getUri())); - } - } - result[1] = builder.build().toByteArray(); - return result; - } - - /** - * Deserialize a function. This method should be used when the function and db name are - * already known. - * @param dbName name of the database the function is in - * @param functionName name of the function - * @param value serialized value of the function - * @return function as an object - * @throws InvalidProtocolBufferException - */ - static Function deserializeFunction(String dbName, String functionName, byte[] value) - throws InvalidProtocolBufferException { - Function func = new Function(); - func.setDbName(dbName); - func.setFunctionName(functionName); - HbaseMetastoreProto.Function protoFunc = HbaseMetastoreProto.Function.parseFrom(value); - if (protoFunc.hasClassName()) func.setClassName(protoFunc.getClassName()); - if (protoFunc.hasOwnerName()) func.setOwnerName(protoFunc.getOwnerName()); - if (protoFunc.hasOwnerType()) { - func.setOwnerType(convertPrincipalTypes(protoFunc.getOwnerType())); - } - func.setCreateTime((int)protoFunc.getCreateTime()); - if (protoFunc.hasFunctionType()) { - func.setFunctionType(convertFunctionTypes(protoFunc.getFunctionType())); - } - for (HbaseMetastoreProto.Function.ResourceUri protoUri : protoFunc.getResourceUrisList()) { - func.addToResourceUris(new ResourceUri(convertResourceTypes(protoUri.getResourceType()), - protoUri.getUri())); - } - return func; - } - - /** - * Deserialize a function. This method should be used when the dbname and function name are - * not already known, such as in a scan. - * @param key key from hbase - * @param value value from hbase - * @return function object - * @throws InvalidProtocolBufferException - */ - static Function deserializeFunction(byte[] key, byte[] value) - throws InvalidProtocolBufferException { - String[] keys = deserializeKey(key); - return deserializeFunction(keys[0], keys[1], value); - } - - private static HbaseMetastoreProto.Function.FunctionType convertFunctionTypes(FunctionType type) { - switch (type) { - case JAVA: return HbaseMetastoreProto.Function.FunctionType.JAVA; - default: throw new RuntimeException("Unknown function type " + type.toString()); - } - } - - private static FunctionType convertFunctionTypes(HbaseMetastoreProto.Function.FunctionType type) { - switch (type) { - case JAVA: return FunctionType.JAVA; - default: throw new RuntimeException("Unknown function type " + type.toString()); - } - } - - private static HbaseMetastoreProto.Function.ResourceUri.ResourceType - convertResourceTypes(ResourceType type) { - switch (type) { - case JAR: return HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; - case FILE: return HbaseMetastoreProto.Function.ResourceUri.ResourceType.FILE; - case ARCHIVE: return HbaseMetastoreProto.Function.ResourceUri.ResourceType.ARCHIVE; - default: throw new RuntimeException("Unknown resource type " + type.toString()); - } - } - - private static ResourceType convertResourceTypes( - HbaseMetastoreProto.Function.ResourceUri.ResourceType type) { - switch (type) { - case JAR: return ResourceType.JAR; - case FILE: return ResourceType.FILE; - case ARCHIVE: return ResourceType.ARCHIVE; - default: throw new RuntimeException("Unknown resource type " + type.toString()); - } - } - - private static List - convertFieldSchemaListFromProto(List protoList) { - List schemas = new ArrayList<>(protoList.size()); - for (HbaseMetastoreProto.FieldSchema proto : protoList) { - schemas.add(new FieldSchema(proto.getName(), proto.getType(), - proto.hasComment() ? proto.getComment() : null)); - } - return schemas; - } - - private static List - convertFieldSchemaListToProto(List schemas) { - List protoList = new ArrayList<>(schemas.size()); - for (FieldSchema fs : schemas) { - HbaseMetastoreProto.FieldSchema.Builder builder = - HbaseMetastoreProto.FieldSchema.newBuilder(); - builder - .setName(fs.getName()) - .setType(fs.getType()); - if (fs.getComment() != null) builder.setComment(fs.getComment()); - protoList.add(builder.build()); - } - return protoList; - } - - /** - * Serialize a storage descriptor. - * @param sd storage descriptor to serialize - * @return serialized storage descriptor. - */ - static byte[] serializeStorageDescriptor(StorageDescriptor sd) { - HbaseMetastoreProto.StorageDescriptor.Builder builder = - HbaseMetastoreProto.StorageDescriptor.newBuilder(); - builder.addAllCols(convertFieldSchemaListToProto(sd.getCols())); - if (sd.getInputFormat() != null) { - builder.setInputFormat(sd.getInputFormat()); - } - if (sd.getOutputFormat() != null) { - builder.setOutputFormat(sd.getOutputFormat()); - } - builder.setIsCompressed(sd.isCompressed()); - builder.setNumBuckets(sd.getNumBuckets()); - if (sd.getSerdeInfo() != null) { - HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder serdeBuilder = - HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder(); - SerDeInfo serde = sd.getSerdeInfo(); - if (serde.getName() != null) { - serdeBuilder.setName(serde.getName()); - } - if (serde.getSerializationLib() != null) { - serdeBuilder.setSerializationLib(serde.getSerializationLib()); - } - if (serde.getParameters() != null) { - serdeBuilder.setParameters(buildParameters(serde.getParameters())); - } - builder.setSerdeInfo(serdeBuilder); - } - if (sd.getBucketCols() != null) { - builder.addAllBucketCols(sd.getBucketCols()); - } - if (sd.getSortCols() != null) { - List orders = sd.getSortCols(); - List protoList = new ArrayList<>(orders.size()); - for (Order order : orders) { - protoList.add(HbaseMetastoreProto.StorageDescriptor.Order.newBuilder() - .setColumnName(order.getCol()) - .setOrder(order.getOrder()) - .build()); - } - builder.addAllSortCols(protoList); - } - if (sd.getSkewedInfo() != null) { - HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder skewBuilder = - HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder(); - SkewedInfo skewed = sd.getSkewedInfo(); - if (skewed.getSkewedColNames() != null) { - skewBuilder.addAllSkewedColNames(skewed.getSkewedColNames()); - } - if (skewed.getSkewedColValues() != null) { - for (List innerList : skewed.getSkewedColValues()) { - HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder listBuilder = - HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.newBuilder(); - listBuilder.addAllSkewedColValue(innerList); - skewBuilder.addSkewedColValues(listBuilder); - } - } - if (skewed.getSkewedColValueLocationMaps() != null) { - for (Map.Entry, String> e : skewed.getSkewedColValueLocationMaps().entrySet()) { - HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder mapBuilder = - HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.newBuilder(); - mapBuilder.addAllKey(e.getKey()); - mapBuilder.setValue(e.getValue()); - skewBuilder.addSkewedColValueLocationMaps(mapBuilder); - } - } - builder.setSkewedInfo(skewBuilder); - } - builder.setStoredAsSubDirectories(sd.isStoredAsSubDirectories()); - - return builder.build().toByteArray(); - } - - /** - * Produce a hash for the storage descriptor - * @param sd storage descriptor to hash - * @param md message descriptor to use to generate the hash - * @return the hash as a byte array - */ - public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { - // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different - // results for hashes based on the OS or JVM being used. - md.reset(); - for (FieldSchema fs : sd.getCols()) { - md.update(fs.getName().getBytes(ENCODING)); - md.update(fs.getType().getBytes(ENCODING)); - if (fs.getComment() != null) md.update(fs.getComment().getBytes(ENCODING)); - } - if (sd.getInputFormat() != null) { - md.update(sd.getInputFormat().getBytes(ENCODING)); - } - if (sd.getOutputFormat() != null) { - md.update(sd.getOutputFormat().getBytes(ENCODING)); - } - md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); - md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); - if (sd.getSerdeInfo() != null) { - SerDeInfo serde = sd.getSerdeInfo(); - if (serde.getName() != null) { - md.update(serde.getName().getBytes(ENCODING)); - } - if (serde.getSerializationLib() != null) { - md.update(serde.getSerializationLib().getBytes(ENCODING)); - } - if (serde.getParameters() != null) { - SortedMap params = new TreeMap<>(serde.getParameters()); - for (Map.Entry param : params.entrySet()) { - md.update(param.getKey().getBytes(ENCODING)); - md.update(param.getValue().getBytes(ENCODING)); - } - } - } - if (sd.getBucketCols() != null) { - List bucketCols = new ArrayList<>(sd.getBucketCols()); - for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); - } - if (sd.getSortCols() != null) { - SortedSet orders = new TreeSet<>(sd.getSortCols()); - for (Order order : orders) { - md.update(order.getCol().getBytes(ENCODING)); - md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); - } - } - if (sd.getSkewedInfo() != null) { - SkewedInfo skewed = sd.getSkewedInfo(); - if (skewed.getSkewedColNames() != null) { - SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); - for (String colname : colnames) md.update(colname.getBytes(ENCODING)); - } - if (skewed.getSkewedColValues() != null) { - SortedSet sortedOuterList = new TreeSet<>(); - for (List innerList : skewed.getSkewedColValues()) { - SortedSet sortedInnerList = new TreeSet<>(innerList); - sortedOuterList.add(StringUtils.join(sortedInnerList, ".")); - } - for (String colval : sortedOuterList) md.update(colval.getBytes(ENCODING)); - } - if (skewed.getSkewedColValueLocationMaps() != null) { - SortedMap sortedMap = new TreeMap<>(); - for (Map.Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { - SortedSet sortedKey = new TreeSet<>(smap.getKey()); - sortedMap.put(StringUtils.join(sortedKey, "."), smap.getValue()); - } - for (Map.Entry e : sortedMap.entrySet()) { - md.update(e.getKey().getBytes(ENCODING)); - md.update(e.getValue().getBytes(ENCODING)); - } - } - md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); - } - - return md.digest(); - } - - static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) - throws InvalidProtocolBufferException { - HbaseMetastoreProto.StorageDescriptor proto = - HbaseMetastoreProto.StorageDescriptor.parseFrom(serialized); - StorageDescriptor sd = new StorageDescriptor(); - sd.setCols(convertFieldSchemaListFromProto(proto.getColsList())); - if (proto.hasInputFormat()) sd.setInputFormat(proto.getInputFormat()); - if (proto.hasOutputFormat()) sd.setOutputFormat(proto.getOutputFormat()); - sd.setCompressed(proto.getIsCompressed()); - sd.setNumBuckets(proto.getNumBuckets()); - if (proto.hasSerdeInfo()) { - SerDeInfo serde = new SerDeInfo(); - serde.setName(proto.getSerdeInfo().hasName()? - proto.getSerdeInfo().getName():null); - serde.setSerializationLib(proto.getSerdeInfo().hasSerializationLib()? - proto.getSerdeInfo().getSerializationLib():null); - serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters())); - sd.setSerdeInfo(serde); - } - sd.setBucketCols(new ArrayList<>(proto.getBucketColsList())); - List sortCols = new ArrayList<>(); - for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) { - sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder())); - } - sd.setSortCols(sortCols); - if (proto.hasSkewedInfo()) { - SkewedInfo skewed = new SkewedInfo(); - skewed - .setSkewedColNames(new ArrayList<>(proto.getSkewedInfo().getSkewedColNamesList())); - for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList innerList : - proto.getSkewedInfo().getSkewedColValuesList()) { - skewed.addToSkewedColValues(new ArrayList<>(innerList.getSkewedColValueList())); - } - Map, String> colMaps = new HashMap<>(); - for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap map : - proto.getSkewedInfo().getSkewedColValueLocationMapsList()) { - colMaps.put(new ArrayList<>(map.getKeyList()), map.getValue()); - } - skewed.setSkewedColValueLocationMaps(colMaps); - sd.setSkewedInfo(skewed); - } - if (proto.hasStoredAsSubDirectories()) { - sd.setStoredAsSubDirectories(proto.getStoredAsSubDirectories()); - } - return sd; - } - - static List getPartitionKeyTypes(List parts) { - com.google.common.base.Function fieldSchemaToType = - new com.google.common.base.Function() { - public String apply(FieldSchema fs) { return fs.getType(); } - }; - return Lists.transform(parts, fieldSchemaToType); - } - - static List getPartitionNames(List parts) { - com.google.common.base.Function fieldSchemaToName = - new com.google.common.base.Function() { - public String apply(FieldSchema fs) { return fs.getName(); } - }; - return Lists.transform(parts, fieldSchemaToName); - } - - /** - * Serialize a partition - * @param part partition object - * @param sdHash hash that is being used as a key for the enclosed storage descriptor - * @return First element is the key, second is the serialized partition - */ - static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash) { - byte[][] result = new byte[2][]; - result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues()); - HbaseMetastoreProto.Partition.Builder builder = HbaseMetastoreProto.Partition.newBuilder(); - builder - .setCreateTime(part.getCreateTime()) - .setLastAccessTime(part.getLastAccessTime()); - if (part.getSd().getLocation() != null) builder.setLocation(part.getSd().getLocation()); - if (part.getSd().getParameters() != null) { - builder.setSdParameters(buildParameters(part.getSd().getParameters())); - } - builder.setSdHash(ByteString.copyFrom(sdHash)); - if (part.getParameters() != null) builder.setParameters(buildParameters(part.getParameters())); - result[1] = builder.build().toByteArray(); - return result; - } - - static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals) { - return buildPartitionKey(dbName, tableName, partTypes, partVals, false); - } - - static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix) { - Object[] components = new Object[partVals.size()]; - for (int i=0;i partTypes, Object[] components, boolean endPrefix) { - ObjectInspector javaStringOI = - PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING); - Object[] data = new Object[components.length+2]; - List fois = new ArrayList(components.length+2); - boolean[] endPrefixes = new boolean[components.length+2]; - - data[0] = dbName; - fois.add(javaStringOI); - endPrefixes[0] = false; - data[1] = tableName; - fois.add(javaStringOI); - endPrefixes[1] = false; - - for (int i = 0; i < components.length; i++) { - data[i+2] = components[i]; - TypeInfo expectedType = - TypeInfoUtils.getTypeInfoFromTypeString(partTypes.get(i)); - ObjectInspector outputOI = - TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType); - fois.add(outputOI); - } - Output output = new Output(); - try { - BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix); - } catch (SerDeException e) { - throw new RuntimeException("Cannot serialize partition " + StringUtils.join(components, ",")); - } - return Arrays.copyOf(output.getData(), output.getLength()); - } - - static class StorageDescriptorParts { - byte[] sdHash; - String location; - Map parameters; - Partition containingPartition; - Table containingTable; - Index containingIndex; - } - - static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setLocation(parts.location); - ssd.setParameters(parts.parameters); - ssd.setShared(sd); - if (parts.containingPartition != null) { - parts.containingPartition.setSd(ssd); - } else if (parts.containingTable != null) { - parts.containingTable.setSd(ssd); - } else if (parts.containingIndex != null) { - parts.containingIndex.setSd(ssd); - } - else { - throw new RuntimeException("Need either a partition or a table"); - } - } - - /** - * Deserialize a partition key when you know nothing about it. That is, you do not know what - * dbname, tablename it came from. - * @param key the key fetched from HBase - * @param callback A reference to the calling HBaseReadWrite object. This has to be done as a - * callback because we have to first deserialize the database name and table - * name, and then fetch the table information, and then we will know how to - * desierliaze the rest of the key. - * @return a list that includes the dbname, tablename, and partition values - * @throws IOException - */ - static List deserializePartitionKey(byte[] key, HBaseReadWrite callback) - throws IOException { - List keyParts = - desierliazeDbNameTableNameFromPartitionKey(key, callback.getConf()); - Table table = callback.getTable(keyParts.get(0), keyParts.get(1)); - keyParts.addAll(deserializePartitionKey(table.getPartitionKeys(), key, callback.getConf())); - return keyParts; - } - - /** - * Deserialize a partition. This version should be used when the partition key is not already - * known and the database and table name are not known either (eg a full scan). Because the - * dbname and tablename (and thus the partition columns) are not known a priori this version - * has to go fetch the table after it figures out which table. If you already have the table - * object you should use - * {@link #deserializePartition(String,String,List,byte[],byte[],Configuration)} - * @param key the key fetched from HBase - * @param serialized the value fetched from HBase - * @param callback A reference to the calling HBaseReadWrite object. This has to be done as a - * callback because we have to first deserialize the database name and table - * name, and then fetch the table information, and then we will know how to - * desierliaze the rest of the key. - * @return A struct that contains the partition plus parts of the storage descriptor - */ - static StorageDescriptorParts deserializePartition(byte[] key, byte[] serialized, - HBaseReadWrite callback) - throws IOException { - List dbNameTableName = - desierliazeDbNameTableNameFromPartitionKey(key, callback.getConf()); - Table table = callback.getTable(dbNameTableName.get(0), dbNameTableName.get(1)); - List keys = deserializePartitionKey(table.getPartitionKeys(), key, callback.getConf()); - return deserializePartition(dbNameTableName.get(0), dbNameTableName.get(1), keys, serialized); - } - - /** - * Deserialize a partition. This version should be used when you know the dbname and tablename - * but not the partition values. - * @param dbName database this partition is in - * @param tableName table this partition is in - * @param partitions schemas for the partition columns of this table - * @param key key fetched from HBase - * @param serialized serialized version of the partition - * @param conf configuration file - * @return - * @throws InvalidProtocolBufferException - */ - static StorageDescriptorParts deserializePartition(String dbName, String tableName, - List partitions, byte[] key, - byte[] serialized, Configuration conf) - throws InvalidProtocolBufferException { - List keys = deserializePartitionKey(partitions, key, conf); - return deserializePartition(dbName, tableName, keys, serialized); - } - - /** - * Deserialize a partition. This version should be used when the partition key is - * known (eg a get). - * @param dbName database name - * @param tableName table name - * @param partVals partition values - * @param serialized the value fetched from HBase - * @return A struct that contains the partition plus parts of the storage descriptor - */ - static StorageDescriptorParts deserializePartition(String dbName, String tableName, - List partVals, byte[] serialized) - throws InvalidProtocolBufferException { - HbaseMetastoreProto.Partition proto = HbaseMetastoreProto.Partition.parseFrom(serialized); - Partition part = new Partition(); - StorageDescriptorParts sdParts = new StorageDescriptorParts(); - sdParts.containingPartition = part; - part.setDbName(dbName); - part.setTableName(tableName); - part.setValues(partVals); - part.setCreateTime((int)proto.getCreateTime()); - part.setLastAccessTime((int)proto.getLastAccessTime()); - if (proto.hasLocation()) sdParts.location = proto.getLocation(); - if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); - sdParts.sdHash = proto.getSdHash().toByteArray(); - if (proto.hasParameters()) part.setParameters(buildParameters(proto.getParameters())); - return sdParts; - } - - static String[] deserializeKey(byte[] key) { - String k = new String(key, ENCODING); - return k.split(KEY_SEPARATOR_STR); - } - - private static List desierliazeDbNameTableNameFromPartitionKey(byte[] key, - Configuration conf) { - StringBuilder names = new StringBuilder(); - names.append("dbName,tableName,"); - StringBuilder types = new StringBuilder(); - types.append("string,string,"); - BinarySortableSerDe serDe = new BinarySortableSerDe(); - Properties props = new Properties(); - props.setProperty(serdeConstants.LIST_COLUMNS, names.toString()); - props.setProperty(serdeConstants.LIST_COLUMN_TYPES, types.toString()); - try { - serDe.initialize(conf, props); - List deserializedkeys = ((List)serDe.deserialize(new BytesWritable(key))).subList(0, 2); - List keys = new ArrayList<>(); - for (int i=0;i deserializePartitionKey(List partitions, byte[] key, - Configuration conf) { - StringBuilder names = new StringBuilder(); - names.append("dbName,tableName,"); - StringBuilder types = new StringBuilder(); - types.append("string,string,"); - for (int i=0;i partitionKeys = new ArrayList(); - for (int i=0;i bits = new ArrayList<>(bitSet.length); - for (int i = 0; i < bitSet.length; i++) bits.add(bitSet[i]); - HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter protoBloom = - HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder() - .setNumBits(bloom.getBitSize()) - .setNumFuncs(bloom.getNumHashFunctions()) - .addAllBits(bits) - .build(); - - HbaseMetastoreProto.AggrStatsBloomFilter proto = - HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder() - .setDbName(ByteString.copyFrom(dbName.getBytes(ENCODING))) - .setTableName(ByteString.copyFrom(tableName.getBytes(ENCODING))) - .setBloomFilter(protoBloom) - .setAggregatedAt(System.currentTimeMillis()) - .build(); - - return proto.toByteArray(); - } - - private static HbaseMetastoreProto.ColumnStats protoBufStatsForOneColumn( - ColumnStatistics partitionColumnStats, ColumnStatisticsObj colStats) throws IOException { - HbaseMetastoreProto.ColumnStats.Builder builder = HbaseMetastoreProto.ColumnStats.newBuilder(); - if (partitionColumnStats != null) { - builder.setLastAnalyzed(partitionColumnStats.getStatsDesc().getLastAnalyzed()); - } - assert colStats.getColType() != null; - builder.setColumnType(colStats.getColType()); - assert colStats.getColName() != null; - builder.setColumnName(colStats.getColName()); - - ColumnStatisticsData colData = colStats.getStatsData(); - switch (colData.getSetField()) { - case BOOLEAN_STATS: - BooleanColumnStatsData boolData = colData.getBooleanStats(); - builder.setNumNulls(boolData.getNumNulls()); - builder.setBoolStats(HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() - .setNumTrues(boolData.getNumTrues()).setNumFalses(boolData.getNumFalses()).build()); - break; - - case LONG_STATS: - LongColumnStatsData longData = colData.getLongStats(); - builder.setNumNulls(longData.getNumNulls()); - builder.setNumDistinctValues(longData.getNumDVs()); - if (longData.isSetBitVectors()) { - builder.setBitVectors(longData.getBitVectors()); - } - builder.setLongStats(HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() - .setLowValue(longData.getLowValue()).setHighValue(longData.getHighValue()).build()); - break; - - case DOUBLE_STATS: - DoubleColumnStatsData doubleData = colData.getDoubleStats(); - builder.setNumNulls(doubleData.getNumNulls()); - builder.setNumDistinctValues(doubleData.getNumDVs()); - if (doubleData.isSetBitVectors()) { - builder.setBitVectors(doubleData.getBitVectors()); - } - builder.setDoubleStats(HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() - .setLowValue(doubleData.getLowValue()).setHighValue(doubleData.getHighValue()).build()); - break; - - case STRING_STATS: - StringColumnStatsData stringData = colData.getStringStats(); - builder.setNumNulls(stringData.getNumNulls()); - builder.setNumDistinctValues(stringData.getNumDVs()); - if (stringData.isSetBitVectors()) { - builder.setBitVectors(stringData.getBitVectors()); - } - builder.setStringStats(HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - .setMaxColLength(stringData.getMaxColLen()).setAvgColLength(stringData.getAvgColLen()) - .build()); - break; - - case BINARY_STATS: - BinaryColumnStatsData binaryData = colData.getBinaryStats(); - builder.setNumNulls(binaryData.getNumNulls()); - builder.setBinaryStats(HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() - .setMaxColLength(binaryData.getMaxColLen()).setAvgColLength(binaryData.getAvgColLen()) - .build()); - break; - - case DECIMAL_STATS: - DecimalColumnStatsData decimalData = colData.getDecimalStats(); - builder.setNumNulls(decimalData.getNumNulls()); - builder.setNumDistinctValues(decimalData.getNumDVs()); - if (decimalData.isSetBitVectors()) { - builder.setBitVectors(decimalData.getBitVectors()); - } - if (decimalData.getLowValue() != null && decimalData.getHighValue() != null) { - builder.setDecimalStats( - HbaseMetastoreProto.ColumnStats.DecimalStats - .newBuilder() - .setLowValue( - HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) - .setScale(decimalData.getLowValue().getScale()).build()) - .setHighValue( - HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() - .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) - .setScale(decimalData.getHighValue().getScale()).build())).build(); - } else { - builder.setDecimalStats(HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder().clear() - .build()); - } - break; - - default: - throw new RuntimeException("Woh, bad. Unknown stats type!"); - } - return builder.build(); - } - - static byte[] serializeStatsForOneColumn(ColumnStatistics partitionColumnStats, - ColumnStatisticsObj colStats) throws IOException { - return protoBufStatsForOneColumn(partitionColumnStats, colStats).toByteArray(); - } - - static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partitionColumnStats, - byte[] bytes) throws IOException { - HbaseMetastoreProto.ColumnStats proto = HbaseMetastoreProto.ColumnStats.parseFrom(bytes); - return statsForOneColumnFromProtoBuf(partitionColumnStats, proto); - } - - private static ColumnStatisticsObj - statsForOneColumnFromProtoBuf(ColumnStatistics partitionColumnStats, - HbaseMetastoreProto.ColumnStats proto) throws IOException { - ColumnStatisticsObj colStats = new ColumnStatisticsObj(); - long lastAnalyzed = proto.getLastAnalyzed(); - if (partitionColumnStats != null) { - partitionColumnStats.getStatsDesc().setLastAnalyzed( - Math.max(lastAnalyzed, partitionColumnStats.getStatsDesc().getLastAnalyzed())); - } - colStats.setColType(proto.getColumnType()); - colStats.setColName(proto.getColumnName()); - - ColumnStatisticsData colData = new ColumnStatisticsData(); - if (proto.hasBoolStats()) { - BooleanColumnStatsData boolData = new BooleanColumnStatsData(); - boolData.setNumTrues(proto.getBoolStats().getNumTrues()); - boolData.setNumFalses(proto.getBoolStats().getNumFalses()); - boolData.setNumNulls(proto.getNumNulls()); - colData.setBooleanStats(boolData); - } else if (proto.hasLongStats()) { - LongColumnStatsData longData = new LongColumnStatsData(); - if (proto.getLongStats().hasLowValue()) { - longData.setLowValue(proto.getLongStats().getLowValue()); - } - if (proto.getLongStats().hasHighValue()) { - longData.setHighValue(proto.getLongStats().getHighValue()); - } - longData.setNumNulls(proto.getNumNulls()); - longData.setNumDVs(proto.getNumDistinctValues()); - longData.setBitVectors(proto.getBitVectors()); - colData.setLongStats(longData); - } else if (proto.hasDoubleStats()) { - DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); - if (proto.getDoubleStats().hasLowValue()) { - doubleData.setLowValue(proto.getDoubleStats().getLowValue()); - } - if (proto.getDoubleStats().hasHighValue()) { - doubleData.setHighValue(proto.getDoubleStats().getHighValue()); - } - doubleData.setNumNulls(proto.getNumNulls()); - doubleData.setNumDVs(proto.getNumDistinctValues()); - doubleData.setBitVectors(proto.getBitVectors()); - colData.setDoubleStats(doubleData); - } else if (proto.hasStringStats()) { - StringColumnStatsData stringData = new StringColumnStatsData(); - stringData.setMaxColLen(proto.getStringStats().getMaxColLength()); - stringData.setAvgColLen(proto.getStringStats().getAvgColLength()); - stringData.setNumNulls(proto.getNumNulls()); - stringData.setNumDVs(proto.getNumDistinctValues()); - stringData.setBitVectors(proto.getBitVectors()); - colData.setStringStats(stringData); - } else if (proto.hasBinaryStats()) { - BinaryColumnStatsData binaryData = new BinaryColumnStatsData(); - binaryData.setMaxColLen(proto.getBinaryStats().getMaxColLength()); - binaryData.setAvgColLen(proto.getBinaryStats().getAvgColLength()); - binaryData.setNumNulls(proto.getNumNulls()); - colData.setBinaryStats(binaryData); - } else if (proto.hasDecimalStats()) { - DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); - if (proto.getDecimalStats().hasHighValue()) { - Decimal hiVal = new Decimal(); - hiVal.setUnscaled(proto.getDecimalStats().getHighValue().getUnscaled().toByteArray()); - hiVal.setScale((short) proto.getDecimalStats().getHighValue().getScale()); - decimalData.setHighValue(hiVal); - } - if (proto.getDecimalStats().hasLowValue()) { - Decimal loVal = new Decimal(); - loVal.setUnscaled(proto.getDecimalStats().getLowValue().getUnscaled().toByteArray()); - loVal.setScale((short) proto.getDecimalStats().getLowValue().getScale()); - decimalData.setLowValue(loVal); - } - decimalData.setNumNulls(proto.getNumNulls()); - decimalData.setNumDVs(proto.getNumDistinctValues()); - decimalData.setBitVectors(proto.getBitVectors()); - colData.setDecimalStats(decimalData); - } else { - throw new RuntimeException("Woh, bad. Unknown stats type!"); - } - colStats.setStatsData(colData); - return colStats; - } - - static byte[] serializeAggrStats(AggrStats aggrStats) throws IOException { - List protoColStats = - new ArrayList<>(aggrStats.getColStatsSize()); - for (ColumnStatisticsObj cso : aggrStats.getColStats()) { - protoColStats.add(protoBufStatsForOneColumn(null, cso)); - } - return HbaseMetastoreProto.AggrStats.newBuilder() - .setPartsFound(aggrStats.getPartsFound()) - .addAllColStats(protoColStats) - .build() - .toByteArray(); - } - - static AggrStats deserializeAggrStats(byte[] serialized) throws IOException { - HbaseMetastoreProto.AggrStats protoAggrStats = - HbaseMetastoreProto.AggrStats.parseFrom(serialized); - AggrStats aggrStats = new AggrStats(); - aggrStats.setPartsFound(protoAggrStats.getPartsFound()); - for (HbaseMetastoreProto.ColumnStats protoCS : protoAggrStats.getColStatsList()) { - aggrStats.addToColStats(statsForOneColumnFromProtoBuf(null, protoCS)); - } - return aggrStats; - } - - /** - * Serialize a delegation token - * @param tokenIdentifier - * @param delegationToken - * @return two byte arrays, first contains the key, the second the serialized value. - */ - static byte[][] serializeDelegationToken(String tokenIdentifier, String delegationToken) { - byte[][] result = new byte[2][]; - result[0] = buildKey(tokenIdentifier); - result[1] = HbaseMetastoreProto.DelegationToken.newBuilder() - .setTokenStr(delegationToken) - .build() - .toByteArray(); - return result; - } - - /** - * Deserialize a delegation token. - * @param value value fetched from hbase - * @return A delegation token. - * @throws InvalidProtocolBufferException - */ - static String deserializeDelegationToken(byte[] value) throws InvalidProtocolBufferException { - HbaseMetastoreProto.DelegationToken protoToken = - HbaseMetastoreProto.DelegationToken.parseFrom(value); - return protoToken.getTokenStr(); - } - - /** - * Serialize a master key - * @param seqNo - * @param key - * @return two byte arrays, first contains the key, the second the serialized value. - */ - static byte[][] serializeMasterKey(Integer seqNo, String key) { - byte[][] result = new byte[2][]; - result[0] = buildKey(seqNo.toString()); - result[1] = HbaseMetastoreProto.MasterKey.newBuilder() - .setMasterKey(key) - .build() - .toByteArray(); - return result; - } - - /** - * Deserialize a master key. - * @param value value fetched from hbase - * @return A master key - * @throws InvalidProtocolBufferException - */ - static String deserializeMasterKey(byte[] value) throws InvalidProtocolBufferException { - HbaseMetastoreProto.MasterKey protoKey = HbaseMetastoreProto.MasterKey.parseFrom(value); - return protoKey.getMasterKey(); - } - - /** - * Serialize the primary key for a table. - * @param pk Primary key columns. It is expected that all of these match to one pk, since - * anything else is meaningless. - * @return two byte arrays, first containts the hbase key, the second the serialized value. - */ - static byte[][] serializePrimaryKey(List pk) { - // First, figure out the dbName and tableName. We expect this to match for all list entries. - byte[][] result = new byte[2][]; - String dbName = pk.get(0).getTable_db(); - String tableName = pk.get(0).getTable_name(); - result[0] = buildKey(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); - - HbaseMetastoreProto.PrimaryKey.Builder builder = HbaseMetastoreProto.PrimaryKey.newBuilder(); - // Encode the primary key, if present - builder.setPkName(pk.get(0).getPk_name()); - builder.setEnableConstraint(pk.get(0).isEnable_cstr()); - builder.setValidateConstraint(pk.get(0).isValidate_cstr()); - builder.setRelyConstraint(pk.get(0).isRely_cstr()); - - for (SQLPrimaryKey pkcol : pk) { - HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.Builder pkColBuilder = - HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn.newBuilder(); - pkColBuilder.setColumnName(pkcol.getColumn_name()); - pkColBuilder.setKeySeq(pkcol.getKey_seq()); - builder.addCols(pkColBuilder); - } - - result[1] = builder.build().toByteArray(); - return result; - } - - /** - * Serialize the foreign key(s) for a table. - * @param fks Foreign key columns. These may belong to multiple foreign keys. - * @return two byte arrays, first containts the key, the second the serialized value. - */ - static byte[][] serializeForeignKeys(List fks) { - // First, figure out the dbName and tableName. We expect this to match for all list entries. - byte[][] result = new byte[2][]; - String dbName = fks.get(0).getFktable_db(); - String tableName = fks.get(0).getFktable_name(); - result[0] = buildKey(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); - - HbaseMetastoreProto.ForeignKeys.Builder builder = HbaseMetastoreProto.ForeignKeys.newBuilder(); - - // Encode any foreign keys we find. This can be complex because there may be more than - // one foreign key in here, so we need to detect that. - Map fkBuilders = new HashMap<>(); - - for (SQLForeignKey fkcol : fks) { - HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder fkBuilder = - fkBuilders.get(fkcol.getFk_name()); - if (fkBuilder == null) { - // We haven't seen this key before, so add it - fkBuilder = HbaseMetastoreProto.ForeignKeys.ForeignKey.newBuilder(); - fkBuilder.setFkName(fkcol.getFk_name()); - fkBuilder.setReferencedDbName(fkcol.getPktable_db()); - assert dbName.equals(fkcol.getFktable_db()) : "You switched databases on me!"; - fkBuilder.setReferencedTableName(fkcol.getPktable_name()); - assert tableName.equals(fkcol.getFktable_name()) : "You switched tables on me!"; - fkBuilder.setReferencedPkName(fkcol.getPk_name()); - fkBuilder.setUpdateRule(fkcol.getUpdate_rule()); - fkBuilder.setDeleteRule(fkcol.getDelete_rule()); - fkBuilder.setEnableConstraint(fkcol.isEnable_cstr()); - fkBuilder.setValidateConstraint(fkcol.isValidate_cstr()); - fkBuilder.setRelyConstraint(fkcol.isRely_cstr()); - fkBuilders.put(fkcol.getFk_name(), fkBuilder); - } - HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.Builder fkColBuilder = - HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn.newBuilder(); - fkColBuilder.setColumnName(fkcol.getFkcolumn_name()); - fkColBuilder.setReferencedColumnName(fkcol.getPkcolumn_name()); - fkColBuilder.setKeySeq(fkcol.getKey_seq()); - fkBuilder.addCols(fkColBuilder); - } - for (HbaseMetastoreProto.ForeignKeys.ForeignKey.Builder fkBuilder : fkBuilders.values()) { - builder.addFks(fkBuilder); - } - result[1] = builder.build().toByteArray(); - return result; - } - - /** - * Serialize the unique constraint(s) for a table. - * @param uks Unique constraint columns. These may belong to multiple unique constraints. - * @return two byte arrays, first contains the key, the second the serialized value. - */ - static byte[][] serializeUniqueConstraints(List uks) { - // First, figure out the dbName and tableName. We expect this to match for all list entries. - byte[][] result = new byte[2][]; - String dbName = uks.get(0).getTable_db(); - String tableName = uks.get(0).getTable_name(); - result[0] = buildKey(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); - - HbaseMetastoreProto.UniqueConstraints.Builder builder = - HbaseMetastoreProto.UniqueConstraints.newBuilder(); - - // Encode any foreign keys we find. This can be complex because there may be more than - // one foreign key in here, so we need to detect that. - Map ukBuilders = new HashMap<>(); - - for (SQLUniqueConstraint ukcol : uks) { - HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder ukBuilder = - ukBuilders.get(ukcol.getUk_name()); - if (ukBuilder == null) { - // We haven't seen this key before, so add it - ukBuilder = HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.newBuilder(); - ukBuilder.setUkName(ukcol.getUk_name()); - ukBuilder.setEnableConstraint(ukcol.isEnable_cstr()); - ukBuilder.setValidateConstraint(ukcol.isValidate_cstr()); - ukBuilder.setRelyConstraint(ukcol.isRely_cstr()); - ukBuilders.put(ukcol.getUk_name(), ukBuilder); - } - assert dbName.equals(ukcol.getTable_db()) : "You switched databases on me!"; - assert tableName.equals(ukcol.getTable_name()) : "You switched tables on me!"; - HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.Builder ukColBuilder = - HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn.newBuilder(); - ukColBuilder.setColumnName(ukcol.getColumn_name()); - ukColBuilder.setKeySeq(ukcol.getKey_seq()); - ukBuilder.addCols(ukColBuilder); - } - for (HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.Builder ukBuilder : ukBuilders.values()) { - builder.addUks(ukBuilder); - } - result[1] = builder.build().toByteArray(); - return result; - } - - /** - * Serialize the not null constraint(s) for a table. - * @param nns Not null constraint columns. These may belong to multiple constraints. - * @return two byte arrays, first contains the constraint, the second the serialized value. - */ - static byte[][] serializeNotNullConstraints(List nns) { - // First, figure out the dbName and tableName. We expect this to match for all list entries. - byte[][] result = new byte[2][]; - String dbName = nns.get(0).getTable_db(); - String tableName = nns.get(0).getTable_name(); - result[0] = buildKey(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); - - HbaseMetastoreProto.NotNullConstraints.Builder builder = - HbaseMetastoreProto.NotNullConstraints.newBuilder(); - - // Encode any foreign keys we find. This can be complex because there may be more than - // one foreign key in here, so we need to detect that. - Map nnBuilders = new HashMap<>(); - - for (SQLNotNullConstraint nncol : nns) { - HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder nnBuilder = - nnBuilders.get(nncol.getNn_name()); - if (nnBuilder == null) { - // We haven't seen this key before, so add it - nnBuilder = HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.newBuilder(); - nnBuilder.setNnName(nncol.getNn_name()); - nnBuilder.setEnableConstraint(nncol.isEnable_cstr()); - nnBuilder.setValidateConstraint(nncol.isValidate_cstr()); - nnBuilder.setRelyConstraint(nncol.isRely_cstr()); - nnBuilders.put(nncol.getNn_name(), nnBuilder); - } - assert dbName.equals(nncol.getTable_db()) : "You switched databases on me!"; - assert tableName.equals(nncol.getTable_name()) : "You switched tables on me!"; - HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.Builder nnColBuilder = - HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn.newBuilder(); - nnColBuilder.setColumnName(nncol.getColumn_name()); - nnBuilder.addCols(nnColBuilder); - } - for (HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.Builder nnBuilder : nnBuilders.values()) { - builder.addNns(nnBuilder); - } - result[1] = builder.build().toByteArray(); - return result; - } - - static List deserializePrimaryKey(String dbName, String tableName, byte[] value) - throws InvalidProtocolBufferException { - HbaseMetastoreProto.PrimaryKey proto = HbaseMetastoreProto.PrimaryKey.parseFrom(value); - List result = new ArrayList<>(); - for (HbaseMetastoreProto.PrimaryKey.PrimaryKeyColumn protoPkCol : proto.getColsList()) { - result.add(new SQLPrimaryKey(dbName, tableName, protoPkCol.getColumnName(), - protoPkCol.getKeySeq(), proto.getPkName(), proto.getEnableConstraint(), - proto.getValidateConstraint(), proto.getRelyConstraint())); - } - - return result; - } - - static List deserializeUniqueConstraint(String dbName, String tableName, byte[] value) - throws InvalidProtocolBufferException { - List result = new ArrayList<>(); - HbaseMetastoreProto.UniqueConstraints protoConstraints = - HbaseMetastoreProto.UniqueConstraints.parseFrom(value); - - for (HbaseMetastoreProto.UniqueConstraints.UniqueConstraint proto : protoConstraints.getUksList()) { - for (HbaseMetastoreProto.UniqueConstraints.UniqueConstraint.UniqueConstraintColumn protoUkCol : - proto.getColsList()) { - result.add(new SQLUniqueConstraint(dbName, tableName, protoUkCol.getColumnName(), - protoUkCol.getKeySeq(), - proto.getUkName(), proto.getEnableConstraint(), - proto.getValidateConstraint(), proto.getRelyConstraint())); - } - } - return result; - } - - static List deserializeNotNullConstraint(String dbName, String tableName, byte[] value) - throws InvalidProtocolBufferException { - List result = new ArrayList<>(); - HbaseMetastoreProto.NotNullConstraints protoConstraints = - HbaseMetastoreProto.NotNullConstraints.parseFrom(value); - - for (HbaseMetastoreProto.NotNullConstraints.NotNullConstraint proto : protoConstraints.getNnsList()) { - for (HbaseMetastoreProto.NotNullConstraints.NotNullConstraint.NotNullConstraintColumn protoNnCol : - proto.getColsList()) { - result.add(new SQLNotNullConstraint(dbName, tableName, protoNnCol.getColumnName(), - proto.getNnName(), proto.getEnableConstraint(), - proto.getValidateConstraint(), proto.getRelyConstraint())); - } - } - return result; - } - - static List deserializeForeignKeys(String dbName, String tableName, byte[] value) - throws InvalidProtocolBufferException { - List result = new ArrayList<>(); - HbaseMetastoreProto.ForeignKeys protoConstraints = - HbaseMetastoreProto.ForeignKeys.parseFrom(value); - - for (HbaseMetastoreProto.ForeignKeys.ForeignKey protoFk : protoConstraints.getFksList()) { - for (HbaseMetastoreProto.ForeignKeys.ForeignKey.ForeignKeyColumn protoFkCol : - protoFk.getColsList()) { - result.add(new SQLForeignKey(protoFk.getReferencedDbName(), protoFk.getReferencedTableName(), - protoFkCol.getReferencedColumnName(), dbName, tableName, protoFkCol.getColumnName(), - protoFkCol.getKeySeq(), protoFk.getUpdateRule(), protoFk.getDeleteRule(), - protoFk.getFkName(), protoFk.getReferencedPkName(), protoFk.getEnableConstraint(), - protoFk.getValidateConstraint(), protoFk.getRelyConstraint())); - } - } - return result; - } - - /** - * @param keyStart byte array representing the start prefix - * @return byte array corresponding to the next possible prefix - */ - static byte[] getEndPrefix(byte[] keyStart) { - if (keyStart == null) { - return null; - } - // Since this is a prefix and not full key, the usual hbase technique of - // appending 0 byte does not work. Instead of that, increment the last byte. - byte[] keyEnd = Arrays.copyOf(keyStart, keyStart.length); - keyEnd[keyEnd.length - 1]++; - return keyEnd; - } - - static byte[] makeLongKey(long v) { - byte[] b = new byte[8]; - b[0] = (byte)(v >>> 56); - b[1] = (byte)(v >>> 48); - b[2] = (byte)(v >>> 40); - b[3] = (byte)(v >>> 32); - b[4] = (byte)(v >>> 24); - b[5] = (byte)(v >>> 16); - b[6] = (byte)(v >>> 8); - b[7] = (byte)(v >>> 0); - return b; - } - - public static double getDoubleValue(Decimal decimal) { - return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java deleted file mode 100644 index 5bbed5d..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.util.HashMap; -import java.util.Map; - -/** - * A generic class for caching objects obtained from HBase. Currently a set of - * convenience methods around a {@link java.util.HashMap} with a max size but built - * as a separate class in case we want to switch out the implementation to something more - * efficient. The cache has a max size; when this is exceeded any additional entries are dropped - * on the floor. - * - * This cache is local to a particular thread and thus is not synchronized. It is intended to be - * flushed before a query begins to make sure it doesn't carry old versions of objects between - * queries (that is, an object may have changed between two queries, we want to get the newest - * version). - */ -class ObjectCache { - private Map cache; - private final int maxSize; - private Counter hits; - private Counter misses; - private Counter overflows; - - /** - * - * @param max maximum number of objects to store in the cache. When max is reached, eviction - * policy is MRU. - * @param hits counter to increment when we find an element in the cache - * @param misses counter to increment when we do not find an element in the cache - * @param overflows counter to increment when we do not have room for an element in the cache - */ - ObjectCache(int max, Counter hits, Counter misses, Counter overflows) { - maxSize = max; - cache = new HashMap(); - this.hits = hits; - this.misses = misses; - this.overflows = overflows; - } - - void put(K key, V value) { - if (cache.size() < maxSize) { - cache.put(key, value); - } else { - overflows.incr(); - } - } - - V get(K key) { - V val = cache.get(key); - if (val == null) misses.incr(); - else hits.incr(); - return val; - } - - void remove(K key) { - cache.remove(key); - } - - void flush() { - cache.clear(); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java deleted file mode 100644 index 08d060f..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.common.ObjectPair; -import org.apache.hadoop.hive.metastore.api.Partition; - -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * A cache for partition objects. This is separate from - * {@link org.apache.hadoop.hive.metastore.hbase.ObjectCache} because we need to access it - * differently (always by table) and because we need to be able to track whether we are caching - * all of the partitions for a table or not. Like ObjectCache it is local to a particular thread - * and thus not synchronized. Also like ObjectCache it is intended to be flushed before each query. - */ -class PartitionCache { - // This is a trie. The key to the first map is (dbname, tablename), since partitions are - // always accessed within the context of the table they belong to. The second map maps - // partition values (not names) to partitions. - private Map, TrieValue> cache; - private final int maxSize; - private int cacheSize; - private Counter misses; - private Counter hits; - private Counter overflows; - - /** - * - * @param max maximum number of objects to store in the cache. When max is reached, eviction - * policy is MRU. - * @param hits counter to increment when we find an element in the cache - * @param misses counter to increment when we do not find an element in the cache - * @param overflows counter to increment when we do not have room for an element in the cache - */ - PartitionCache(int max, Counter hits, Counter misses, Counter overflows) { - maxSize = max; - cache = new HashMap, TrieValue>(); - cacheSize = 0; - this.hits = hits; - this.misses = misses; - this.overflows = overflows; - } - - /** - * Put a single partition into the cache - * @param dbName - * @param tableName - * @param part - */ - void put(String dbName, String tableName, Partition part) { - if (cacheSize < maxSize) { - ObjectPair key = new ObjectPair(dbName, tableName); - TrieValue entry = cache.get(key); - if (entry == null) { - entry = new TrieValue(false); - cache.put(key, entry); - } - entry.map.put(part.getValues(), part); - cacheSize++; - } else { - overflows.incr(); - } - } - - /** - * - * @param dbName - * @param tableName - * @param parts - * @param allForTable if true indicates that all partitions for this table are present - */ - void put(String dbName, String tableName, List parts, boolean allForTable) { - if (cacheSize + parts.size() < maxSize) { - ObjectPair key = new ObjectPair(dbName, tableName); - TrieValue entry = cache.get(key); - if (entry == null) { - entry = new TrieValue(allForTable); - cache.put(key, entry); - } - for (Partition part : parts) entry.map.put(part.getValues(), part); - cacheSize += parts.size(); - } else { - overflows.incr(); - } - } - - /** - * Will only return a value if all partitions for this table are in the cache. Otherwise you - * should call {@link #get} individually - * @param dbName - * @param tableName - * @return - */ - Collection getAllForTable(String dbName, String tableName) { - TrieValue entry = cache.get(new ObjectPair(dbName, tableName)); - if (entry != null && entry.hasAllPartitionsForTable) { - hits.incr(); - return entry.map.values(); - } else { - misses.incr(); - return null; - } - } - - Partition get(String dbName, String tableName, List partVals) { - TrieValue entry = cache.get(new ObjectPair(dbName, tableName)); - if (entry != null) { - hits.incr(); - return entry.map.get(partVals); - } else { - misses.incr(); - return null; - } - } - - void remove(String dbName, String tableName) { - ObjectPair key = new ObjectPair(dbName, tableName); - TrieValue entry = cache.get(key); - if (entry != null) { - cacheSize -= entry.map.size(); - cache.remove(key); - } - } - - void remove(String dbName, String tableName, List partVals) { - ObjectPair key = new ObjectPair(dbName, tableName); - TrieValue entry = cache.get(key); - if (entry != null && entry.map.remove(partVals) != null) { - cacheSize--; - entry.hasAllPartitionsForTable = false; - } - } - - void flush() { - cache.clear(); - cacheSize = 0; - } - - static class TrieValue { - boolean hasAllPartitionsForTable; - Map, Partition> map; - - TrieValue(boolean hasAll) { - hasAllPartitionsForTable = hasAll; - map = new HashMap, Partition>(); - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java deleted file mode 100644 index 2b0863d..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java +++ /dev/null @@ -1,292 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Properties; - -import org.apache.commons.lang.ArrayUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.filter.ByteArrayComparable; -import org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; -import org.apache.hadoop.io.BytesWritable; - -import com.google.protobuf.InvalidProtocolBufferException; - -public class PartitionKeyComparator extends ByteArrayComparable { - private static final Logger LOG = LoggerFactory.getLogger(PartitionKeyComparator.class); - static class Mark { - Mark(String value, boolean inclusive) { - this.value = value; - this.inclusive = inclusive; - } - String value; - boolean inclusive; - public String toString() { - return value + (inclusive?"_":""); - } - } - static class Range { - Range(String keyName, Mark start, Mark end) { - this.keyName = keyName; - this.start = start; - this.end = end; - } - String keyName; - Mark start; - Mark end; - public String toString() { - return "" + keyName + ":" + (start!=null?start.toString():"") + (end!=null?end.toString():""); - } - } - // Cache the information derived from ranges for performance, including - // range in native datatype - static class NativeRange { - int pos; - Comparable start; - Comparable end; - } - static class Operator { - public Operator(Type type, String keyName, String val) { - this.type = type; - this.keyName = keyName; - this.val = val; - } - enum Type { - LIKE, NOTEQUALS - }; - Type type; - String keyName; - String val; - } - static class NativeOperator { - int pos; - Comparable val; - } - String names; - String types; - List ranges; - List nativeRanges; - List ops; - List nativeOps; - Properties serdeProps; - public PartitionKeyComparator(String names, String types, List ranges, List ops) { - super(null); - this.names = names; - this.types = types; - this.ranges = ranges; - this.ops = ops; - serdeProps = new Properties(); - serdeProps.setProperty(serdeConstants.LIST_COLUMNS, "dbName,tableName," + names); - serdeProps.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string," + types); - - this.nativeRanges = new ArrayList(this.ranges.size()); - for (int i=0;i(this.ops.size()); - for (int i=0;i ranges = new ArrayList(); - for (HbaseMetastoreProto.PartitionKeyComparator.Range range : proto.getRangeList()) { - Mark start = null; - if (range.hasStart()) { - start = new Mark(range.getStart().getValue(), range.getStart().getInclusive()); - } - Mark end = null; - if (range.hasEnd()) { - end = new Mark(range.getEnd().getValue(), range.getEnd().getInclusive()); - } - ranges.add(new Range(range.getKey(), start, end)); - } - List ops = new ArrayList(); - for (HbaseMetastoreProto.PartitionKeyComparator.Operator op : proto.getOpList()) { - ops.add(new Operator(Operator.Type.valueOf(op.getType().name()), op.getKey(), - op.getVal())); - } - return new PartitionKeyComparator(proto.getNames(), proto.getTypes(), ranges, ops); - } - - @Override - public byte[] toByteArray() { - HbaseMetastoreProto.PartitionKeyComparator.Builder builder = - HbaseMetastoreProto.PartitionKeyComparator.newBuilder(); - builder.setNames(names); - builder.setTypes(types); - for (int i=0;i=0 || - !range.start.inclusive && partVal.compareTo(nativeRange.start)>0) { - if (range.end == null || range.end.inclusive && partVal.compareTo(nativeRange.end)<=0 || - !range.end.inclusive && partVal.compareTo(nativeRange.end)<0) { - continue; - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Fail to match range " + range.keyName + "-" + partVal + "[" + nativeRange.start - + "," + nativeRange.end + "]"); - } - return 1; - } - - for (int i=0;i getCols() { - copyCols(); - return super.getCols(); - } - - @Override - public void setCols(List cols) { - colsCopied = true; - super.setCols(cols); - } - - @Override - public void unsetCols() { - colsCopied = true; - super.unsetCols(); - } - - @Override - public Iterator getColsIterator() { - copyCols(); - return super.getColsIterator(); - } - - private void copyCols() { - if (!colsCopied) { - colsCopied = true; - if (super.getCols() != null) { - List cols = new ArrayList(super.getColsSize()); - for (FieldSchema fs : super.getCols()) cols.add(new FieldSchema(fs)); - super.setCols(cols); - } - } - } - - @Override - public SerDeInfo getSerdeInfo() { - copySerde(); - return super.getSerdeInfo(); - } - - @Override - public void setSerdeInfo(SerDeInfo serdeInfo) { - serdeCopied = true; - super.setSerdeInfo(serdeInfo); - } - - @Override - public void unsetSerdeInfo() { - serdeCopied = true; - super.unsetSerdeInfo(); - } - - private void copySerde() { - if (!serdeCopied) { - serdeCopied = true; - if (super.getSerdeInfo() != null) super.setSerdeInfo(new SerDeInfo(super.getSerdeInfo())); - } - } - - @Override - public void addToBucketCols(String bucket) { - copyBucketCols(); - super.addToBucketCols(bucket); - } - - @Override - public List getBucketCols() { - copyBucketCols(); - return super.getBucketCols(); - } - - @Override - public void setBucketCols(List buckets) { - bucketsCopied = true; - super.setBucketCols(buckets); - } - - @Override - public void unsetBucketCols() { - bucketsCopied = true; - super.unsetBucketCols(); - } - - @Override - public Iterator getBucketColsIterator() { - copyBucketCols(); - return super.getBucketColsIterator(); - } - - private void copyBucketCols() { - if (!bucketsCopied) { - bucketsCopied = true; - if (super.getBucketCols() != null) { - List buckets = new ArrayList(super.getBucketColsSize()); - for (String bucket : super.getBucketCols()) buckets.add(bucket); - super.setBucketCols(buckets); - } - } - } - - @Override - public void addToSortCols(Order sort) { - copySort(); - super.addToSortCols(sort); - } - - @Override - public List getSortCols() { - copySort(); - return super.getSortCols(); - } - - @Override - public void setSortCols(List sorts) { - sortCopied = true; - super.setSortCols(sorts); - } - - @Override - public void unsetSortCols() { - sortCopied = true; - super.unsetSortCols(); - } - - @Override - public Iterator getSortColsIterator() { - copySort(); - return super.getSortColsIterator(); - } - - private void copySort() { - if (!sortCopied) { - sortCopied = true; - if (super.getSortCols() != null) { - List sortCols = new ArrayList(super.getSortColsSize()); - for (Order sortCol : super.getSortCols()) sortCols.add(new Order(sortCol)); - super.setSortCols(sortCols); - } - } - } - - @Override - public SkewedInfo getSkewedInfo() { - copySkewed(); - return super.getSkewedInfo(); - } - - @Override - public void setSkewedInfo(SkewedInfo skewedInfo) { - skewedCopied = true; - super.setSkewedInfo(skewedInfo); - } - - @Override - public void unsetSkewedInfo() { - skewedCopied = true; - super.unsetSkewedInfo(); - } - - private void copySkewed() { - if (!skewedCopied) { - skewedCopied = true; - if (super.getSkewedInfo() != null) super.setSkewedInfo(new SkewedInfo(super.getSkewedInfo())); - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java deleted file mode 100644 index 78a962a..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java +++ /dev/null @@ -1,325 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.protobuf.ByteString; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.HiveStatsUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; -import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; - -import java.io.IOException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * A cache for stats. This is only intended for use by - * {@link org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite} and should not be used outside - * that class. - */ -class StatsCache { - - private static final Logger LOG = LoggerFactory.getLogger(StatsCache.class.getName()); - private static StatsCache self = null; - - private LoadingCache cache; - private Invalidator invalidator; - private long runInvalidatorEvery; - private long maxTimeInCache; - private boolean invalidatorHasRun; - - @VisibleForTesting Counter misses; - @VisibleForTesting Counter hbaseHits; - @VisibleForTesting Counter totalGets; - - static synchronized StatsCache getInstance(Configuration conf) { - if (self == null) { - self = new StatsCache(conf); - } - return self; - } - - private StatsCache(final Configuration conf) { - final StatsCache me = this; - cache = CacheBuilder.newBuilder() - .maximumSize( - HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES)) - .expireAfterWrite(HiveConf.getTimeVar(conf, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL, TimeUnit.SECONDS), TimeUnit.SECONDS) - .build(new CacheLoader() { - @Override - public AggrStats load(StatsCacheKey key) throws Exception { - boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION); - double ndvTuner = HiveConf.getFloatVar(conf, - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER); - HBaseReadWrite hrw = HBaseReadWrite.getInstance(); - AggrStats aggrStats = hrw.getAggregatedStats(key.hashed); - if (aggrStats == null) { - misses.incr(); - ColumnStatsAggregator aggregator = null; - aggrStats = new AggrStats(); - LOG.debug("Unable to find aggregated stats for " + key.colName + ", aggregating"); - List css = hrw.getPartitionStatistics(key.dbName, key.tableName, - key.partNames, HBaseStore.partNameListToValsList(key.partNames), - Collections.singletonList(key.colName)); - if (css != null && css.size() > 0) { - aggrStats.setPartsFound(css.size()); - if (aggregator == null) { - aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css.iterator() - .next().getStatsObj().iterator().next().getStatsData().getSetField(), - useDensityFunctionForNDVEstimation, ndvTuner); - } - ColumnStatisticsObj statsObj = aggregator - .aggregate(key.colName, key.partNames, css); - aggrStats.addToColStats(statsObj); - me.put(key, aggrStats); - } - } else { - hbaseHits.incr(); - } - return aggrStats; - } - }); - misses = new Counter("Stats cache table misses"); - hbaseHits = new Counter("Stats cache table hits"); - totalGets = new Counter("Total get calls to the stats cache"); - - maxTimeInCache = HiveConf.getTimeVar(conf, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL, TimeUnit.SECONDS); - // We want runEvery in milliseconds, even though we give the default value in the conf in - // seconds. - runInvalidatorEvery = HiveConf.getTimeVar(conf, - HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY, TimeUnit.MILLISECONDS); - - invalidator = new Invalidator(); - invalidator.setDaemon(true); - invalidator.start(); - } - - /** - * Add an object to the cache. - * @param key Key for this entry - * @param aggrStats stats - * @throws java.io.IOException - */ - void put(StatsCacheKey key, AggrStats aggrStats) throws IOException { - HBaseReadWrite.getInstance().putAggregatedStats(key.hashed, key.dbName, key.tableName, - key.partNames, - key.colName, aggrStats); - cache.put(key, aggrStats); - } - - /** - * Get partition level statistics - * @param dbName name of database table is in - * @param tableName name of table - * @param partNames names of the partitions - * @param colName of column to get stats for - * @return stats object for this column, or null if none cached - * @throws java.io.IOException - */ - - AggrStats get(String dbName, String tableName, List partNames, String colName) - throws IOException { - totalGets.incr(); - StatsCacheKey key = new StatsCacheKey(dbName, tableName, partNames, colName); - try { - return cache.get(key); - } catch (ExecutionException e) { - throw new IOException(e); - } - } - - /** - * Remove all entries that are related to a particular set of partitions. This should be - * called when partitions are deleted or stats are updated. - * @param dbName name of database table is in - * @param tableName name of table - * @param partName name of the partition - * @throws IOException - */ - void invalidate(String dbName, String tableName, String partName) - throws IOException { - invalidator.addToQueue( - HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.newBuilder() - .setDbName(ByteString.copyFrom(dbName.getBytes(HBaseUtils.ENCODING))) - .setTableName(ByteString.copyFrom(tableName.getBytes(HBaseUtils.ENCODING))) - .setPartName(ByteString.copyFrom(partName.getBytes(HBaseUtils.ENCODING))) - .build()); - } - - void dumpCounters() { - LOG.debug(misses.dump()); - LOG.debug(hbaseHits.dump()); - LOG.debug(totalGets.dump()); - } - - /** - * Completely dump the cache from memory, used to test that we can access stats from HBase itself. - * @throws IOException - */ - @VisibleForTesting void flushMemory() throws IOException { - cache.invalidateAll(); - } - - @VisibleForTesting void resetCounters() { - misses.clear(); - hbaseHits.clear(); - totalGets.clear(); - } - - @VisibleForTesting void setRunInvalidatorEvery(long runEvery) { - runInvalidatorEvery = runEvery; - } - - @VisibleForTesting void setMaxTimeInCache(long maxTime) { - maxTimeInCache = maxTime; - } - - @VisibleForTesting void wakeInvalidator() throws InterruptedException { - invalidatorHasRun = false; - // Wait through 2 cycles so we're sure our entry won't be picked as too new. - Thread.sleep(2 * runInvalidatorEvery); - invalidator.interrupt(); - while (!invalidatorHasRun) { - Thread.sleep(10); - } - } - - static class StatsCacheKey { - final byte[] hashed; - String dbName; - String tableName; - List partNames; - String colName; - private MessageDigest md; - - StatsCacheKey(byte[] key) { - hashed = key; - } - - StatsCacheKey(String dbName, String tableName, List partNames, String colName) { - this.dbName = dbName; - this.tableName = tableName; - this.partNames = partNames; - this.colName = colName; - - try { - md = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - md.update(dbName.getBytes(HBaseUtils.ENCODING)); - md.update(tableName.getBytes(HBaseUtils.ENCODING)); - Collections.sort(this.partNames); - for (String s : partNames) { - md.update(s.getBytes(HBaseUtils.ENCODING)); - } - md.update(colName.getBytes(HBaseUtils.ENCODING)); - hashed = md.digest(); - } - - @Override - public boolean equals(Object other) { - if (other == null || !(other instanceof StatsCacheKey)) return false; - StatsCacheKey that = (StatsCacheKey)other; - return Arrays.equals(hashed, that.hashed); - } - - @Override - public int hashCode() { - return Arrays.hashCode(hashed); - } - } - - private class Invalidator extends Thread { - private List entries = new ArrayList<>(); - private Lock lock = new ReentrantLock(); - - void addToQueue(HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry entry) { - lock.lock(); - try { - entries.add(entry); - } finally { - lock.unlock(); - } - } - - @Override - public void run() { - while (true) { - long startedAt = System.currentTimeMillis(); - List thisRun = null; - lock.lock(); - try { - if (entries.size() > 0) { - thisRun = entries; - entries = new ArrayList<>(); - } - } finally { - lock.unlock(); - } - - if (thisRun != null) { - try { - HbaseMetastoreProto.AggrStatsInvalidatorFilter filter = - HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() - .setRunEvery(runInvalidatorEvery) - .setMaxCacheEntryLife(maxTimeInCache) - .addAllToInvalidate(thisRun) - .build(); - List keys = - HBaseReadWrite.getInstance().invalidateAggregatedStats(filter); - cache.invalidateAll(keys); - } catch (IOException e) { - // Not a lot I can do here - LOG.error("Caught error while invalidating entries in the cache", e); - } - } - invalidatorHasRun = true; - - try { - sleep(runInvalidatorEvery - (System.currentTimeMillis() - startedAt)); - } catch (InterruptedException e) { - LOG.warn("Interupted while sleeping", e); - } - } - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java deleted file mode 100644 index f66200f..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import co.cask.tephra.TransactionAware; -import co.cask.tephra.TransactionContext; -import co.cask.tephra.TransactionFailureException; -import co.cask.tephra.TransactionManager; -import co.cask.tephra.TransactionSystemClient; -import co.cask.tephra.distributed.ThreadLocalClientProvider; -import co.cask.tephra.distributed.TransactionServiceClient; -import co.cask.tephra.hbase10.TransactionAwareHTable; -import co.cask.tephra.hbase10.coprocessor.TransactionProcessor; -import co.cask.tephra.inmemory.InMemoryTxSystemClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.twill.discovery.InMemoryDiscoveryService; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * A class that uses Tephra for transaction management. - */ -public class TephraHBaseConnection extends VanillaHBaseConnection { - static final private Logger LOG = LoggerFactory.getLogger(TephraHBaseConnection.class.getName()); - - private Map txnTables; - private TransactionContext txn; - private TransactionSystemClient txnClient; - - TephraHBaseConnection() { - super(); - txnTables = new HashMap(); - } - - @Override - public void connect() throws IOException { - super.connect(); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { - LOG.debug("Using an in memory client transaction system for testing"); - TransactionManager txnMgr = new TransactionManager(conf); - txnMgr.startAndWait(); - txnClient = new InMemoryTxSystemClient(txnMgr); - } else { - // TODO should enable use of ZKDiscoveryService if users want it - LOG.debug("Using real client transaction system for production"); - txnClient = new TransactionServiceClient(conf, - new ThreadLocalClientProvider(conf, new InMemoryDiscoveryService())); - } - for (String tableName : HBaseReadWrite.tableNames) { - txnTables.put(tableName, new TransactionAwareHTable(super.getHBaseTable(tableName, true))); - } - txn = new TransactionContext(txnClient, txnTables.values()); - } - - @Override - public void beginTransaction() throws IOException { - try { - txn.start(); - LOG.debug("Started txn in tephra"); - } catch (TransactionFailureException e) { - throw new IOException(e); - } - } - - @Override - public void commitTransaction() throws IOException { - try { - txn.finish(); - LOG.debug("Finished txn in tephra"); - } catch (TransactionFailureException e) { - throw new IOException(e); - } - } - - @Override - public void rollbackTransaction() throws IOException { - try { - txn.abort(); - LOG.debug("Aborted txn in tephra"); - } catch (TransactionFailureException e) { - throw new IOException(e); - } - } - - @Override - public void flush(HTableInterface htab) throws IOException { - // NO-OP as we want to flush at commit time - } - - @Override - protected HTableDescriptor buildDescriptor(String tableName, List columnFamilies) - throws IOException { - HTableDescriptor tableDesc = super.buildDescriptor(tableName, columnFamilies); - tableDesc.addCoprocessor(TransactionProcessor.class.getName()); - return tableDesc; - } - - @Override - public HTableInterface getHBaseTable(String tableName, boolean force) throws IOException { - // Ignore force, it will mess up our previous creation of the tables. - return (TransactionAwareHTable)txnTables.get(tableName); - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java deleted file mode 100644 index e631580..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.Result; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * A pass through to a simple HBase connection. This has no transactions. - */ -public class VanillaHBaseConnection implements HBaseConnection { - static final private Logger LOG = LoggerFactory.getLogger(VanillaHBaseConnection.class.getName()); - - protected HConnection conn; - protected Map tables; - protected Configuration conf; - - VanillaHBaseConnection() { - tables = new HashMap(); - } - - @Override - public void connect() throws IOException { - if (conf == null) throw new RuntimeException("Must call getConf before connect"); - conn = HConnectionManager.createConnection(conf); - } - - @Override - public void close() throws IOException { - for (HTableInterface htab : tables.values()) htab.close(); - } - - @Override - public void beginTransaction() throws IOException { - - } - - @Override - public void commitTransaction() throws IOException { - - } - - @Override - public void rollbackTransaction() throws IOException { - - } - - @Override - public void flush(HTableInterface htab) throws IOException { - htab.flushCommits(); - } - - @Override - public void createHBaseTable(String tableName, List columnFamilies) - throws IOException { - HBaseAdmin admin = new HBaseAdmin(conn); - LOG.info("Creating HBase table " + tableName); - admin.createTable(buildDescriptor(tableName, columnFamilies)); - admin.close(); - } - - protected HTableDescriptor buildDescriptor(String tableName, List columnFamilies) - throws IOException { - HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); - for (byte[] cf : columnFamilies) { - tableDesc.addFamily(new HColumnDescriptor(cf)); - } - return tableDesc; - } - - @Override - public HTableInterface getHBaseTable(String tableName) throws IOException { - return getHBaseTable(tableName, false); - } - - @Override - public HTableInterface getHBaseTable(String tableName, boolean force) throws IOException { - HTableInterface htab = tables.get(tableName); - if (htab == null) { - LOG.debug("Trying to connect to table " + tableName); - try { - htab = conn.getTable(tableName); - // Calling gettable doesn't actually connect to the region server, it's very light - // weight, so call something else so we actually reach out and touch the region server - // and see if the table is there. - if (force) htab.get(new Get("nosuchkey".getBytes(HBaseUtils.ENCODING))); - } catch (IOException e) { - LOG.info("Caught exception when table was missing"); - return null; - } - htab.setAutoFlushTo(false); - tables.put(tableName, htab); - } - return htab; - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } - - @Override - public Configuration getConf() { - return conf; - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BinaryColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BinaryColumnStatsAggregator.java deleted file mode 100644 index e6c836b..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BinaryColumnStatsAggregator.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.MetaException; - -public class BinaryColumnStatsAggregator extends ColumnStatsAggregator { - - @Override - public ColumnStatisticsObj aggregate(String colName, List partNames, - List css) throws MetaException { - ColumnStatisticsObj statsObj = null; - BinaryColumnStatsData aggregateData = null; - String colType = null; - for (ColumnStatistics cs : css) { - if (cs.getStatsObjSize() != 1) { - throw new MetaException( - "The number of columns should be exactly one in aggrStats, but found " - + cs.getStatsObjSize()); - } - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - if (statsObj == null) { - colType = cso.getColType(); - statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso - .getStatsData().getSetField()); - } - BinaryColumnStatsData newData = cso.getStatsData().getBinaryStats(); - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); - aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } - } - ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); - columnStatisticsData.setBinaryStats(aggregateData); - statsObj.setStatsData(columnStatisticsData); - return statsObj; - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BooleanColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BooleanColumnStatsAggregator.java deleted file mode 100644 index a34bc9f..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BooleanColumnStatsAggregator.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.MetaException; - -public class BooleanColumnStatsAggregator extends ColumnStatsAggregator { - - @Override - public ColumnStatisticsObj aggregate(String colName, List partNames, - List css) throws MetaException { - ColumnStatisticsObj statsObj = null; - BooleanColumnStatsData aggregateData = null; - String colType = null; - for (ColumnStatistics cs : css) { - if (cs.getStatsObjSize() != 1) { - throw new MetaException( - "The number of columns should be exactly one in aggrStats, but found " - + cs.getStatsObjSize()); - } - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - if (statsObj == null) { - colType = cso.getColType(); - statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso - .getStatsData().getSetField()); - } - BooleanColumnStatsData newData = cso.getStatsData().getBooleanStats(); - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); - aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } - } - ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); - columnStatisticsData.setBooleanStats(aggregateData); - statsObj.setStatsData(columnStatisticsData); - return statsObj; - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregator.java deleted file mode 100644 index a52e5e5..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregator.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.MetaException; - -public abstract class ColumnStatsAggregator { - public boolean useDensityFunctionForNDVEstimation; - public double ndvTuner; - public abstract ColumnStatisticsObj aggregate(String colName, List partNames, - List css) throws MetaException; -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java deleted file mode 100644 index 173e06f..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; - -public class ColumnStatsAggregatorFactory { - - private ColumnStatsAggregatorFactory() { - } - - public static ColumnStatsAggregator getColumnStatsAggregator(_Fields type, - boolean useDensityFunctionForNDVEstimation, double ndvTuner) { - ColumnStatsAggregator agg; - switch (type) { - case BOOLEAN_STATS: - agg = new BooleanColumnStatsAggregator(); - break; - case LONG_STATS: - agg = new LongColumnStatsAggregator(); - break; - case DATE_STATS: - agg = new DateColumnStatsAggregator(); - break; - case DOUBLE_STATS: - agg = new DoubleColumnStatsAggregator(); - break; - case STRING_STATS: - agg = new StringColumnStatsAggregator(); - break; - case BINARY_STATS: - agg = new BinaryColumnStatsAggregator(); - break; - case DECIMAL_STATS: - agg = new DecimalColumnStatsAggregator(); - break; - default: - throw new RuntimeException("Woh, bad. Unknown stats type " + type.toString()); - } - agg.useDensityFunctionForNDVEstimation = useDensityFunctionForNDVEstimation; - agg.ndvTuner = ndvTuner; - return agg; - } - - public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) { - ColumnStatisticsObj cso = new ColumnStatisticsObj(); - ColumnStatisticsData csd = new ColumnStatisticsData(); - cso.setColName(colName); - cso.setColType(colType); - switch (type) { - case BOOLEAN_STATS: - csd.setBooleanStats(new BooleanColumnStatsData()); - break; - - case LONG_STATS: - csd.setLongStats(new LongColumnStatsData()); - break; - - case DATE_STATS: - csd.setDateStats(new DateColumnStatsData()); - break; - - case DOUBLE_STATS: - csd.setDoubleStats(new DoubleColumnStatsData()); - break; - - case STRING_STATS: - csd.setStringStats(new StringColumnStatsData()); - break; - - case BINARY_STATS: - csd.setBinaryStats(new BinaryColumnStatsData()); - break; - - case DECIMAL_STATS: - csd.setDecimalStats(new DecimalColumnStatsData()); - break; - - default: - throw new RuntimeException("Woh, bad. Unknown stats type!"); - } - - cso.setStatsData(csd); - return cso; - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DecimalColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DecimalColumnStatsAggregator.java deleted file mode 100644 index 5924c3e..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DecimalColumnStatsAggregator.java +++ /dev/null @@ -1,371 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.StatObjectConverter; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.hbase.HBaseUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DecimalColumnStatsAggregator extends ColumnStatsAggregator implements - IExtrapolatePartStatus { - - private static final Logger LOG = LoggerFactory.getLogger(DecimalColumnStatsAggregator.class); - - @Override - public ColumnStatisticsObj aggregate(String colName, List partNames, - List css) throws MetaException { - ColumnStatisticsObj statsObj = null; - - // check if all the ColumnStatisticsObjs contain stats and all the ndv are - // bitvectors - boolean doAllPartitionContainStats = partNames.size() == css.size(); - LOG.debug("doAllPartitionContainStats for " + colName + " is " + doAllPartitionContainStats); - NumDistinctValueEstimator ndvEstimator = null; - String colType = null; - for (ColumnStatistics cs : css) { - if (cs.getStatsObjSize() != 1) { - throw new MetaException( - "The number of columns should be exactly one in aggrStats, but found " - + cs.getStatsObjSize()); - } - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - if (statsObj == null) { - colType = cso.getColType(); - statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso - .getStatsData().getSetField()); - } - if (!cso.getStatsData().getDecimalStats().isSetBitVectors() - || cso.getStatsData().getDecimalStats().getBitVectors().length() == 0) { - ndvEstimator = null; - break; - } else { - // check if all of the bit vectors can merge - NumDistinctValueEstimator estimator = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(cso.getStatsData().getDecimalStats().getBitVectors()); - if (ndvEstimator == null) { - ndvEstimator = estimator; - } else { - if (ndvEstimator.canMerge(estimator)) { - continue; - } else { - ndvEstimator = null; - break; - } - } - } - } - if (ndvEstimator != null) { - ndvEstimator = NumDistinctValueEstimatorFactory - .getEmptyNumDistinctValueEstimator(ndvEstimator); - } - LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null)); - ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); - if (doAllPartitionContainStats || css.size() < 2) { - DecimalColumnStatsData aggregateData = null; - long lowerBound = 0; - long higherBound = 0; - double densityAvgSum = 0.0; - for (ColumnStatistics cs : css) { - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); - lowerBound = Math.max(lowerBound, newData.getNumDVs()); - higherBound += newData.getNumDVs(); - densityAvgSum += (HBaseUtils.getDoubleValue(newData.getHighValue()) - HBaseUtils - .getDoubleValue(newData.getLowValue())) / newData.getNumDVs(); - if (ndvEstimator != null) { - ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors())); - } - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - if (HBaseUtils.getDoubleValue(aggregateData.getLowValue()) < HBaseUtils - .getDoubleValue(newData.getLowValue())) { - aggregateData.setLowValue(aggregateData.getLowValue()); - } else { - aggregateData.setLowValue(newData.getLowValue()); - } - if (HBaseUtils.getDoubleValue(aggregateData.getHighValue()) > HBaseUtils - .getDoubleValue(newData.getHighValue())) { - aggregateData.setHighValue(aggregateData.getHighValue()); - } else { - aggregateData.setHighValue(newData.getHighValue()); - } - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } - } - if (ndvEstimator != null) { - // if all the ColumnStatisticsObjs contain bitvectors, we do not need to - // use uniform distribution assumption because we can merge bitvectors - // to get a good estimation. - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - } else { - long estimation; - if (useDensityFunctionForNDVEstimation) { - // We have estimation, lowerbound and higherbound. We use estimation - // if it is between lowerbound and higherbound. - double densityAvg = densityAvgSum / partNames.size(); - estimation = (long) ((HBaseUtils.getDoubleValue(aggregateData.getHighValue()) - HBaseUtils - .getDoubleValue(aggregateData.getLowValue())) / densityAvg); - if (estimation < lowerBound) { - estimation = lowerBound; - } else if (estimation > higherBound) { - estimation = higherBound; - } - } else { - estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); - } - aggregateData.setNumDVs(estimation); - } - columnStatisticsData.setDecimalStats(aggregateData); - } else { - // we need extrapolation - LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); - for (int index = 0; index < partNames.size(); index++) { - indexMap.put(partNames.get(index), index); - } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); - // while we scan the css, we also get the densityAvg, lowerbound and - // higerbound when useDensityFunctionForNDVEstimation is true. - double densityAvgSum = 0.0; - if (ndvEstimator == null) { - // if not every partition uses bitvector for ndv, we just fall back to - // the traditional extrapolation methods. - for (ColumnStatistics cs : css) { - String partName = cs.getStatsDesc().getPartName(); - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (HBaseUtils.getDoubleValue(newData.getHighValue()) - HBaseUtils - .getDoubleValue(newData.getLowValue())) / newData.getNumDVs(); - } - adjustedIndexMap.put(partName, (double) indexMap.get(partName)); - adjustedStatsMap.put(partName, cso.getStatsData()); - } - } else { - // we first merge all the adjacent bitvectors that we could merge and - // derive new partition names and index. - StringBuilder pseudoPartName = new StringBuilder(); - double pseudoIndexSum = 0; - int length = 0; - int curIndex = -1; - DecimalColumnStatsData aggregateData = null; - for (ColumnStatistics cs : css) { - String partName = cs.getStatsDesc().getPartName(); - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats(); - // newData.isSetBitVectors() should be true for sure because we - // already checked it before. - if (indexMap.get(partName) != curIndex) { - // There is bitvector, but it is not adjacent to the previous ones. - if (length > 0) { - // we have to set ndv - adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - ColumnStatisticsData csd = new ColumnStatisticsData(); - csd.setDecimalStats(aggregateData); - adjustedStatsMap.put(pseudoPartName.toString(), csd); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (HBaseUtils.getDoubleValue(aggregateData.getHighValue()) - HBaseUtils - .getDoubleValue(aggregateData.getLowValue())) / aggregateData.getNumDVs(); - } - // reset everything - pseudoPartName = new StringBuilder(); - pseudoIndexSum = 0; - length = 0; - ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); - } - aggregateData = null; - } - curIndex = indexMap.get(partName); - pseudoPartName.append(partName); - pseudoIndexSum += curIndex; - length++; - curIndex++; - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - if (HBaseUtils.getDoubleValue(aggregateData.getLowValue()) < HBaseUtils - .getDoubleValue(newData.getLowValue())) { - aggregateData.setLowValue(aggregateData.getLowValue()); - } else { - aggregateData.setLowValue(newData.getLowValue()); - } - if (HBaseUtils.getDoubleValue(aggregateData.getHighValue()) > HBaseUtils - .getDoubleValue(newData.getHighValue())) { - aggregateData.setHighValue(aggregateData.getHighValue()); - } else { - aggregateData.setHighValue(newData.getHighValue()); - } - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } - ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors())); - } - if (length > 0) { - // we have to set ndv - adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - ColumnStatisticsData csd = new ColumnStatisticsData(); - csd.setDecimalStats(aggregateData); - adjustedStatsMap.put(pseudoPartName.toString(), csd); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (HBaseUtils.getDoubleValue(aggregateData.getHighValue()) - HBaseUtils - .getDoubleValue(aggregateData.getLowValue())) / aggregateData.getNumDVs(); - } - } - } - extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, - adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); - } - statsObj.setStatsData(columnStatisticsData); - LOG.debug("Ndv estimatation for " + colName + " is " - + columnStatisticsData.getDecimalStats().getNumDVs()); - return statsObj; - } - - @Override - public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, - int numPartsWithStats, Map adjustedIndexMap, - Map adjustedStatsMap, double densityAvg) { - int rightBorderInd = numParts; - DecimalColumnStatsData extrapolateDecimalData = new DecimalColumnStatsData(); - Map extractedAdjustedStatsMap = new HashMap<>(); - for (Map.Entry entry : adjustedStatsMap.entrySet()) { - extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDecimalStats()); - } - List> list = new LinkedList>( - extractedAdjustedStatsMap.entrySet()); - // get the lowValue - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getLowValue().compareTo(o2.getValue().getLowValue()); - } - }); - double minInd = adjustedIndexMap.get(list.get(0).getKey()); - double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - double lowValue = 0; - double min = HBaseUtils.getDoubleValue(list.get(0).getValue().getLowValue()); - double max = HBaseUtils.getDoubleValue(list.get(list.size() - 1).getValue().getLowValue()); - if (minInd == maxInd) { - lowValue = min; - } else if (minInd < maxInd) { - // left border is the min - lowValue = (max - (max - min) * maxInd / (maxInd - minInd)); - } else { - // right border is the min - lowValue = (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd)); - } - - // get the highValue - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getHighValue().compareTo(o2.getValue().getHighValue()); - } - }); - minInd = adjustedIndexMap.get(list.get(0).getKey()); - maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - double highValue = 0; - min = HBaseUtils.getDoubleValue(list.get(0).getValue().getHighValue()); - max = HBaseUtils.getDoubleValue(list.get(list.size() - 1).getValue().getHighValue()); - if (minInd == maxInd) { - highValue = min; - } else if (minInd < maxInd) { - // right border is the max - highValue = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); - } else { - // left border is the max - highValue = (min + (max - min) * minInd / (minInd - maxInd)); - } - - // get the #nulls - long numNulls = 0; - for (Map.Entry entry : extractedAdjustedStatsMap.entrySet()) { - numNulls += entry.getValue().getNumNulls(); - } - // we scale up sumNulls based on the number of partitions - numNulls = numNulls * numParts / numPartsWithStats; - - // get the ndv - long ndv = 0; - long ndvMin = 0; - long ndvMax = 0; - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getNumDVs() < o2.getValue().getNumDVs() ? -1 : 1; - } - }); - long lowerBound = list.get(list.size() - 1).getValue().getNumDVs(); - long higherBound = 0; - for (Map.Entry entry : list) { - higherBound += entry.getValue().getNumDVs(); - } - if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) { - ndv = (long) ((highValue - lowValue) / densityAvg); - if (ndv < lowerBound) { - ndv = lowerBound; - } else if (ndv > higherBound) { - ndv = higherBound; - } - } else { - minInd = adjustedIndexMap.get(list.get(0).getKey()); - maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - ndvMin = list.get(0).getValue().getNumDVs(); - ndvMax = list.get(list.size() - 1).getValue().getNumDVs(); - if (minInd == maxInd) { - ndv = ndvMin; - } else if (minInd < maxInd) { - // right border is the max - ndv = (long) (ndvMin + (ndvMax - ndvMin) * (rightBorderInd - minInd) / (maxInd - minInd)); - } else { - // left border is the max - ndv = (long) (ndvMin + (ndvMax - ndvMin) * minInd / (minInd - maxInd)); - } - } - extrapolateDecimalData.setLowValue(StatObjectConverter.createThriftDecimal(String - .valueOf(lowValue))); - extrapolateDecimalData.setHighValue(StatObjectConverter.createThriftDecimal(String - .valueOf(highValue))); - extrapolateDecimalData.setNumNulls(numNulls); - extrapolateDecimalData.setNumDVs(ndv); - extrapolateData.setDecimalStats(extrapolateDecimalData); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DoubleColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DoubleColumnStatsAggregator.java deleted file mode 100644 index e55c412..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DoubleColumnStatsAggregator.java +++ /dev/null @@ -1,345 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DoubleColumnStatsAggregator extends ColumnStatsAggregator implements - IExtrapolatePartStatus { - - private static final Logger LOG = LoggerFactory.getLogger(LongColumnStatsAggregator.class); - - @Override - public ColumnStatisticsObj aggregate(String colName, List partNames, - List css) throws MetaException { - ColumnStatisticsObj statsObj = null; - - // check if all the ColumnStatisticsObjs contain stats and all the ndv are - // bitvectors - boolean doAllPartitionContainStats = partNames.size() == css.size(); - LOG.debug("doAllPartitionContainStats for " + colName + " is " + doAllPartitionContainStats); - NumDistinctValueEstimator ndvEstimator = null; - String colType = null; - for (ColumnStatistics cs : css) { - if (cs.getStatsObjSize() != 1) { - throw new MetaException( - "The number of columns should be exactly one in aggrStats, but found " - + cs.getStatsObjSize()); - } - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - if (statsObj == null) { - colType = cso.getColType(); - statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso - .getStatsData().getSetField()); - } - if (!cso.getStatsData().getDoubleStats().isSetBitVectors() - || cso.getStatsData().getDoubleStats().getBitVectors().length() == 0) { - ndvEstimator = null; - break; - } else { - // check if all of the bit vectors can merge - NumDistinctValueEstimator estimator = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(cso.getStatsData().getDoubleStats().getBitVectors()); - if (ndvEstimator == null) { - ndvEstimator = estimator; - } else { - if (ndvEstimator.canMerge(estimator)) { - continue; - } else { - ndvEstimator = null; - break; - } - } - } - } - if (ndvEstimator != null) { - ndvEstimator = NumDistinctValueEstimatorFactory - .getEmptyNumDistinctValueEstimator(ndvEstimator); - } - LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null)); - ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); - if (doAllPartitionContainStats || css.size() < 2) { - DoubleColumnStatsData aggregateData = null; - long lowerBound = 0; - long higherBound = 0; - double densityAvgSum = 0.0; - for (ColumnStatistics cs : css) { - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); - lowerBound = Math.max(lowerBound, newData.getNumDVs()); - higherBound += newData.getNumDVs(); - densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); - if (ndvEstimator != null) { - ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors())); - } - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); - aggregateData - .setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } - } - if (ndvEstimator != null) { - // if all the ColumnStatisticsObjs contain bitvectors, we do not need to - // use uniform distribution assumption because we can merge bitvectors - // to get a good estimation. - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - } else { - long estimation; - if (useDensityFunctionForNDVEstimation) { - // We have estimation, lowerbound and higherbound. We use estimation - // if it is between lowerbound and higherbound. - double densityAvg = densityAvgSum / partNames.size(); - estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg); - if (estimation < lowerBound) { - estimation = lowerBound; - } else if (estimation > higherBound) { - estimation = higherBound; - } - } else { - estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); - } - aggregateData.setNumDVs(estimation); - } - columnStatisticsData.setDoubleStats(aggregateData); - } else { - // we need extrapolation - LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); - for (int index = 0; index < partNames.size(); index++) { - indexMap.put(partNames.get(index), index); - } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); - // while we scan the css, we also get the densityAvg, lowerbound and - // higerbound when useDensityFunctionForNDVEstimation is true. - double densityAvgSum = 0.0; - if (ndvEstimator == null) { - // if not every partition uses bitvector for ndv, we just fall back to - // the traditional extrapolation methods. - for (ColumnStatistics cs : css) { - String partName = cs.getStatsDesc().getPartName(); - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); - } - adjustedIndexMap.put(partName, (double) indexMap.get(partName)); - adjustedStatsMap.put(partName, cso.getStatsData()); - } - } else { - // we first merge all the adjacent bitvectors that we could merge and - // derive new partition names and index. - StringBuilder pseudoPartName = new StringBuilder(); - double pseudoIndexSum = 0; - int length = 0; - int curIndex = -1; - DoubleColumnStatsData aggregateData = null; - for (ColumnStatistics cs : css) { - String partName = cs.getStatsDesc().getPartName(); - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); - // newData.isSetBitVectors() should be true for sure because we - // already checked it before. - if (indexMap.get(partName) != curIndex) { - // There is bitvector, but it is not adjacent to the previous ones. - if (length > 0) { - // we have to set ndv - adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - ColumnStatisticsData csd = new ColumnStatisticsData(); - csd.setDoubleStats(aggregateData); - adjustedStatsMap.put(pseudoPartName.toString(), csd); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); - } - // reset everything - pseudoPartName = new StringBuilder(); - pseudoIndexSum = 0; - length = 0; - ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); - } - aggregateData = null; - } - curIndex = indexMap.get(partName); - pseudoPartName.append(partName); - pseudoIndexSum += curIndex; - length++; - curIndex++; - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); - aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), - newData.getHighValue())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } - ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors())); - } - if (length > 0) { - // we have to set ndv - adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - ColumnStatisticsData csd = new ColumnStatisticsData(); - csd.setDoubleStats(aggregateData); - adjustedStatsMap.put(pseudoPartName.toString(), csd); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); - } - } - } - extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, - adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); - } - LOG.debug("Ndv estimatation for " + colName + " is " - + columnStatisticsData.getDoubleStats().getNumDVs()); - statsObj.setStatsData(columnStatisticsData); - return statsObj; - } - - @Override - public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, - int numPartsWithStats, Map adjustedIndexMap, - Map adjustedStatsMap, double densityAvg) { - int rightBorderInd = numParts; - DoubleColumnStatsData extrapolateDoubleData = new DoubleColumnStatsData(); - Map extractedAdjustedStatsMap = new HashMap<>(); - for (Map.Entry entry : adjustedStatsMap.entrySet()) { - extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDoubleStats()); - } - List> list = new LinkedList>( - extractedAdjustedStatsMap.entrySet()); - // get the lowValue - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getLowValue() < o2.getValue().getLowValue() ? -1 : 1; - } - }); - double minInd = adjustedIndexMap.get(list.get(0).getKey()); - double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - double lowValue = 0; - double min = list.get(0).getValue().getLowValue(); - double max = list.get(list.size() - 1).getValue().getLowValue(); - if (minInd == maxInd) { - lowValue = min; - } else if (minInd < maxInd) { - // left border is the min - lowValue = (max - (max - min) * maxInd / (maxInd - minInd)); - } else { - // right border is the min - lowValue = (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd)); - } - - // get the highValue - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getHighValue() < o2.getValue().getHighValue() ? -1 : 1; - } - }); - minInd = adjustedIndexMap.get(list.get(0).getKey()); - maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - double highValue = 0; - min = list.get(0).getValue().getHighValue(); - max = list.get(list.size() - 1).getValue().getHighValue(); - if (minInd == maxInd) { - highValue = min; - } else if (minInd < maxInd) { - // right border is the max - highValue = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); - } else { - // left border is the max - highValue = (min + (max - min) * minInd / (minInd - maxInd)); - } - - // get the #nulls - long numNulls = 0; - for (Map.Entry entry : extractedAdjustedStatsMap.entrySet()) { - numNulls += entry.getValue().getNumNulls(); - } - // we scale up sumNulls based on the number of partitions - numNulls = numNulls * numParts / numPartsWithStats; - - // get the ndv - long ndv = 0; - long ndvMin = 0; - long ndvMax = 0; - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getNumDVs() < o2.getValue().getNumDVs() ? -1 : 1; - } - }); - long lowerBound = list.get(list.size() - 1).getValue().getNumDVs(); - long higherBound = 0; - for (Map.Entry entry : list) { - higherBound += entry.getValue().getNumDVs(); - } - if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) { - ndv = (long) ((highValue - lowValue) / densityAvg); - if (ndv < lowerBound) { - ndv = lowerBound; - } else if (ndv > higherBound) { - ndv = higherBound; - } - } else { - minInd = adjustedIndexMap.get(list.get(0).getKey()); - maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - ndvMin = list.get(0).getValue().getNumDVs(); - ndvMax = list.get(list.size() - 1).getValue().getNumDVs(); - if (minInd == maxInd) { - ndv = ndvMin; - } else if (minInd < maxInd) { - // right border is the max - ndv = (long) (ndvMin + (ndvMax - ndvMin) * (rightBorderInd - minInd) / (maxInd - minInd)); - } else { - // left border is the max - ndv = (long) (ndvMin + (ndvMax - ndvMin) * minInd / (minInd - maxInd)); - } - } - extrapolateDoubleData.setLowValue(lowValue); - extrapolateDoubleData.setHighValue(highValue); - extrapolateDoubleData.setNumNulls(numNulls); - extrapolateDoubleData.setNumDVs(ndv); - extrapolateData.setDoubleStats(extrapolateDoubleData); - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java deleted file mode 100644 index acf679e..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.Map; - -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; - -public interface IExtrapolatePartStatus { - // The following function will extrapolate the stats when the column stats of - // some partitions are missing. - /** - * @param extrapolateData - * it will carry back the specific stats, e.g., DOUBLE_STATS or - * LONG_STATS - * @param numParts - * the total number of partitions - * @param numPartsWithStats - * the number of partitions that have stats - * @param adjustedIndexMap - * the partition name to index map - * @param adjustedStatsMap - * the partition name to its stats map - * @param densityAvg - * the average of ndv density, which is useful when - * useDensityFunctionForNDVEstimation is true. - */ - public abstract void extrapolate(ColumnStatisticsData extrapolateData, int numParts, - int numPartsWithStats, Map adjustedIndexMap, - Map adjustedStatsMap, double densityAvg); - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/LongColumnStatsAggregator.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/LongColumnStatsAggregator.java deleted file mode 100644 index 2ee09f3..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/LongColumnStatsAggregator.java +++ /dev/null @@ -1,344 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.aggr; - -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class LongColumnStatsAggregator extends ColumnStatsAggregator implements - IExtrapolatePartStatus { - - private static final Logger LOG = LoggerFactory.getLogger(LongColumnStatsAggregator.class); - - @Override - public ColumnStatisticsObj aggregate(String colName, List partNames, - List css) throws MetaException { - ColumnStatisticsObj statsObj = null; - - // check if all the ColumnStatisticsObjs contain stats and all the ndv are - // bitvectors - boolean doAllPartitionContainStats = partNames.size() == css.size(); - LOG.debug("doAllPartitionContainStats for " + colName + " is " + doAllPartitionContainStats); - NumDistinctValueEstimator ndvEstimator = null; - String colType = null; - for (ColumnStatistics cs : css) { - if (cs.getStatsObjSize() != 1) { - throw new MetaException( - "The number of columns should be exactly one in aggrStats, but found " - + cs.getStatsObjSize()); - } - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - if (statsObj == null) { - colType = cso.getColType(); - statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso - .getStatsData().getSetField()); - } - if (!cso.getStatsData().getLongStats().isSetBitVectors() - || cso.getStatsData().getLongStats().getBitVectors().length() == 0) { - ndvEstimator = null; - break; - } else { - // check if all of the bit vectors can merge - NumDistinctValueEstimator estimator = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(cso.getStatsData().getLongStats().getBitVectors()); - if (ndvEstimator == null) { - ndvEstimator = estimator; - } else { - if (ndvEstimator.canMerge(estimator)) { - continue; - } else { - ndvEstimator = null; - break; - } - } - } - } - if (ndvEstimator != null) { - ndvEstimator = NumDistinctValueEstimatorFactory - .getEmptyNumDistinctValueEstimator(ndvEstimator); - } - LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null)); - ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData(); - if (doAllPartitionContainStats || css.size() < 2) { - LongColumnStatsData aggregateData = null; - long lowerBound = 0; - long higherBound = 0; - double densityAvgSum = 0.0; - for (ColumnStatistics cs : css) { - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - LongColumnStatsData newData = cso.getStatsData().getLongStats(); - lowerBound = Math.max(lowerBound, newData.getNumDVs()); - higherBound += newData.getNumDVs(); - densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); - if (ndvEstimator != null) { - ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors())); - } - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); - aggregateData - .setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } - } - if (ndvEstimator != null) { - // if all the ColumnStatisticsObjs contain bitvectors, we do not need to - // use uniform distribution assumption because we can merge bitvectors - // to get a good estimation. - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - } else { - long estimation; - if (useDensityFunctionForNDVEstimation) { - // We have estimation, lowerbound and higherbound. We use estimation - // if it is between lowerbound and higherbound. - double densityAvg = densityAvgSum / partNames.size(); - estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg); - if (estimation < lowerBound) { - estimation = lowerBound; - } else if (estimation > higherBound) { - estimation = higherBound; - } - } else { - estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); - } - aggregateData.setNumDVs(estimation); - } - columnStatisticsData.setLongStats(aggregateData); - } else { - // we need extrapolation - LOG.debug("start extrapolation for " + colName); - - Map indexMap = new HashMap(); - for (int index = 0; index < partNames.size(); index++) { - indexMap.put(partNames.get(index), index); - } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); - // while we scan the css, we also get the densityAvg, lowerbound and - // higerbound when useDensityFunctionForNDVEstimation is true. - double densityAvgSum = 0.0; - if (ndvEstimator == null) { - // if not every partition uses bitvector for ndv, we just fall back to - // the traditional extrapolation methods. - for (ColumnStatistics cs : css) { - String partName = cs.getStatsDesc().getPartName(); - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - LongColumnStatsData newData = cso.getStatsData().getLongStats(); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); - } - adjustedIndexMap.put(partName, (double) indexMap.get(partName)); - adjustedStatsMap.put(partName, cso.getStatsData()); - } - } else { - // we first merge all the adjacent bitvectors that we could merge and - // derive new partition names and index. - StringBuilder pseudoPartName = new StringBuilder(); - double pseudoIndexSum = 0; - int length = 0; - int curIndex = -1; - LongColumnStatsData aggregateData = null; - for (ColumnStatistics cs : css) { - String partName = cs.getStatsDesc().getPartName(); - ColumnStatisticsObj cso = cs.getStatsObjIterator().next(); - LongColumnStatsData newData = cso.getStatsData().getLongStats(); - // newData.isSetBitVectors() should be true for sure because we - // already checked it before. - if (indexMap.get(partName) != curIndex) { - // There is bitvector, but it is not adjacent to the previous ones. - if (length > 0) { - // we have to set ndv - adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - ColumnStatisticsData csd = new ColumnStatisticsData(); - csd.setLongStats(aggregateData); - adjustedStatsMap.put(pseudoPartName.toString(), csd); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); - } - // reset everything - pseudoPartName = new StringBuilder(); - pseudoIndexSum = 0; - length = 0; - ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); - } - aggregateData = null; - } - curIndex = indexMap.get(partName); - pseudoPartName.append(partName); - pseudoIndexSum += curIndex; - length++; - curIndex++; - if (aggregateData == null) { - aggregateData = newData.deepCopy(); - } else { - aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); - aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), - newData.getHighValue())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } - ndvEstimator.mergeEstimators(NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors())); - } - if (length > 0) { - // we have to set ndv - adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); - aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); - ColumnStatisticsData csd = new ColumnStatisticsData(); - csd.setLongStats(aggregateData); - adjustedStatsMap.put(pseudoPartName.toString(), csd); - if (useDensityFunctionForNDVEstimation) { - densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); - } - } - } - extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, - adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); - } - statsObj.setStatsData(columnStatisticsData); - LOG.debug("Ndv estimatation for " + colName + " is " - + columnStatisticsData.getLongStats().getNumDVs()); - return statsObj; - } - - @Override - public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, - int numPartsWithStats, Map adjustedIndexMap, - Map adjustedStatsMap, double densityAvg) { - int rightBorderInd = numParts; - LongColumnStatsData extrapolateLongData = new LongColumnStatsData(); - Map extractedAdjustedStatsMap = new HashMap<>(); - for (Map.Entry entry : adjustedStatsMap.entrySet()) { - extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getLongStats()); - } - List> list = new LinkedList>( - extractedAdjustedStatsMap.entrySet()); - // get the lowValue - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getLowValue() < o2.getValue().getLowValue() ? -1 : 1; - } - }); - double minInd = adjustedIndexMap.get(list.get(0).getKey()); - double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - long lowValue = 0; - long min = list.get(0).getValue().getLowValue(); - long max = list.get(list.size() - 1).getValue().getLowValue(); - if (minInd == maxInd) { - lowValue = min; - } else if (minInd < maxInd) { - // left border is the min - lowValue = (long) (max - (max - min) * maxInd / (maxInd - minInd)); - } else { - // right border is the min - lowValue = (long) (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd)); - } - - // get the highValue - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getHighValue() < o2.getValue().getHighValue() ? -1 : 1; - } - }); - minInd = adjustedIndexMap.get(list.get(0).getKey()); - maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - long highValue = 0; - min = list.get(0).getValue().getHighValue(); - max = list.get(list.size() - 1).getValue().getHighValue(); - if (minInd == maxInd) { - highValue = min; - } else if (minInd < maxInd) { - // right border is the max - highValue = (long) (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); - } else { - // left border is the max - highValue = (long) (min + (max - min) * minInd / (minInd - maxInd)); - } - - // get the #nulls - long numNulls = 0; - for (Map.Entry entry : extractedAdjustedStatsMap.entrySet()) { - numNulls += entry.getValue().getNumNulls(); - } - // we scale up sumNulls based on the number of partitions - numNulls = numNulls * numParts / numPartsWithStats; - - // get the ndv - long ndv = 0; - Collections.sort(list, new Comparator>() { - public int compare(Map.Entry o1, - Map.Entry o2) { - return o1.getValue().getNumDVs() < o2.getValue().getNumDVs() ? -1 : 1; - } - }); - long lowerBound = list.get(list.size() - 1).getValue().getNumDVs(); - long higherBound = 0; - for (Map.Entry entry : list) { - higherBound += entry.getValue().getNumDVs(); - } - if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) { - ndv = (long) ((highValue - lowValue) / densityAvg); - if (ndv < lowerBound) { - ndv = lowerBound; - } else if (ndv > higherBound) { - ndv = higherBound; - } - } else { - minInd = adjustedIndexMap.get(list.get(0).getKey()); - maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey()); - min = list.get(0).getValue().getNumDVs(); - max = list.get(list.size() - 1).getValue().getNumDVs(); - if (minInd == maxInd) { - ndv = min; - } else if (minInd < maxInd) { - // right border is the max - ndv = (long) (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd)); - } else { - // left border is the max - ndv = (long) (min + (max - min) * minInd / (minInd - maxInd)); - } - } - extrapolateLongData.setLowValue(lowValue); - extrapolateLongData.setHighValue(highValue); - extrapolateLongData.setNumNulls(numNulls); - extrapolateLongData.setNumDVs(ndv); - extrapolateData.setLongStats(extrapolateLongData); - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/BinaryColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/BinaryColumnStatsMerger.java deleted file mode 100644 index 4c2d1bc..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/BinaryColumnStatsMerger.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; - -public class BinaryColumnStatsMerger extends ColumnStatsMerger { - - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - BinaryColumnStatsData aggregateData = aggregateColStats.getStatsData().getBinaryStats(); - BinaryColumnStatsData newData = newColStats.getStatsData().getBinaryStats(); - aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); - aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/BooleanColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/BooleanColumnStatsMerger.java deleted file mode 100644 index 8e50153..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/BooleanColumnStatsMerger.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; - -public class BooleanColumnStatsMerger extends ColumnStatsMerger { - - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - BooleanColumnStatsData aggregateData = aggregateColStats.getStatsData().getBooleanStats(); - BooleanColumnStatsData newData = newColStats.getStatsData().getBooleanStats(); - aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); - aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/ColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/ColumnStatsMerger.java deleted file mode 100644 index 474d4dd..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/ColumnStatsMerger.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public abstract class ColumnStatsMerger { - protected final Logger LOG = LoggerFactory.getLogger(ColumnStatsMerger.class.getName()); - - public abstract void merge(ColumnStatisticsObj aggregateColStats, - ColumnStatisticsObj newColStats); -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/ColumnStatsMergerFactory.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/ColumnStatsMergerFactory.java deleted file mode 100644 index 0ce1847..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/ColumnStatsMergerFactory.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; - -public class ColumnStatsMergerFactory { - - private ColumnStatsMergerFactory() { - } - - public static ColumnStatsMerger getColumnStatsMerger(ColumnStatisticsObj statsObjNew, - ColumnStatisticsObj statsObjOld) { - ColumnStatsMerger agg; - _Fields typeNew = statsObjNew.getStatsData().getSetField(); - _Fields typeOld = statsObjOld.getStatsData().getSetField(); - // make sure that they have the same type - typeNew = typeNew == typeOld ? typeNew : null; - switch (typeNew) { - case BOOLEAN_STATS: - agg = new BooleanColumnStatsMerger(); - break; - case LONG_STATS: { - agg = new LongColumnStatsMerger(); - break; - } - case DOUBLE_STATS: { - agg = new DoubleColumnStatsMerger(); - break; - } - case STRING_STATS: { - agg = new StringColumnStatsMerger(); - break; - } - case BINARY_STATS: - agg = new BinaryColumnStatsMerger(); - break; - case DECIMAL_STATS: { - agg = new DecimalColumnStatsMerger(); - break; - } - case DATE_STATS: { - agg = new DateColumnStatsMerger(); - break; - } - default: - throw new IllegalArgumentException("Unknown stats type " + typeNew.toString()); - } - return agg; - } - - public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) { - ColumnStatisticsObj cso = new ColumnStatisticsObj(); - ColumnStatisticsData csd = new ColumnStatisticsData(); - cso.setColName(colName); - cso.setColType(colType); - switch (type) { - case BOOLEAN_STATS: - csd.setBooleanStats(new BooleanColumnStatsData()); - break; - - case LONG_STATS: - csd.setLongStats(new LongColumnStatsData()); - break; - - case DOUBLE_STATS: - csd.setDoubleStats(new DoubleColumnStatsData()); - break; - - case STRING_STATS: - csd.setStringStats(new StringColumnStatsData()); - break; - - case BINARY_STATS: - csd.setBinaryStats(new BinaryColumnStatsData()); - break; - - case DECIMAL_STATS: - csd.setDecimalStats(new DecimalColumnStatsData()); - break; - - case DATE_STATS: - csd.setDateStats(new DateColumnStatsData()); - break; - - default: - throw new IllegalArgumentException("Unknown stats type"); - } - - cso.setStatsData(csd); - return cso; - } - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DateColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DateColumnStatsMerger.java deleted file mode 100644 index 2542a00..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DateColumnStatsMerger.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Date; -import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; - -public class DateColumnStatsMerger extends ColumnStatsMerger { - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - DateColumnStatsData aggregateData = aggregateColStats.getStatsData().getDateStats(); - DateColumnStatsData newData = newColStats.getStatsData().getDateStats(); - Date lowValue = aggregateData.getLowValue().compareTo(newData.getLowValue()) < 0 ? aggregateData - .getLowValue() : newData.getLowValue(); - aggregateData.setLowValue(lowValue); - Date highValue = aggregateData.getHighValue().compareTo(newData.getHighValue()) >= 0 ? aggregateData - .getHighValue() : newData.getHighValue(); - aggregateData.setHighValue(highValue); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 - || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } else { - NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(aggregateData.getBitVectors()); - NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors()); - long ndv = -1; - if (oldEst.canMerge(newEst)) { - oldEst.mergeEstimators(newEst); - ndv = oldEst.estimateNumDistinctValues(); - aggregateData.setBitVectors(oldEst.serialize()); - } else { - ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); - } - LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " - + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); - aggregateData.setNumDVs(ndv); - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DecimalColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DecimalColumnStatsMerger.java deleted file mode 100644 index 4e8e129..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DecimalColumnStatsMerger.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; - -public class DecimalColumnStatsMerger extends ColumnStatsMerger { - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - DecimalColumnStatsData aggregateData = aggregateColStats.getStatsData().getDecimalStats(); - DecimalColumnStatsData newData = newColStats.getStatsData().getDecimalStats(); - Decimal lowValue = aggregateData.getLowValue() != null - && (aggregateData.getLowValue().compareTo(newData.getLowValue()) > 0) ? aggregateData - .getLowValue() : newData.getLowValue(); - aggregateData.setLowValue(lowValue); - Decimal highValue = aggregateData.getHighValue() != null - && (aggregateData.getHighValue().compareTo(newData.getHighValue()) > 0) ? aggregateData - .getHighValue() : newData.getHighValue(); - aggregateData.setHighValue(highValue); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 - || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } else { - NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(aggregateData.getBitVectors()); - NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors()); - long ndv = -1; - if (oldEst.canMerge(newEst)) { - oldEst.mergeEstimators(newEst); - ndv = oldEst.estimateNumDistinctValues(); - aggregateData.setBitVectors(oldEst.serialize()); - } else { - ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); - } - LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " - + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); - aggregateData.setNumDVs(ndv); - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DoubleColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DoubleColumnStatsMerger.java deleted file mode 100644 index 4ef5c39..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/DoubleColumnStatsMerger.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; - -public class DoubleColumnStatsMerger extends ColumnStatsMerger { - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - DoubleColumnStatsData aggregateData = aggregateColStats.getStatsData().getDoubleStats(); - DoubleColumnStatsData newData = newColStats.getStatsData().getDoubleStats(); - aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); - aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 - || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } else { - NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(aggregateData.getBitVectors()); - NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors()); - long ndv = -1; - if (oldEst.canMerge(newEst)) { - oldEst.mergeEstimators(newEst); - ndv = oldEst.estimateNumDistinctValues(); - aggregateData.setBitVectors(oldEst.serialize()); - } else { - ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); - } - LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " - + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); - aggregateData.setNumDVs(ndv); - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/LongColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/LongColumnStatsMerger.java deleted file mode 100644 index acf7f03..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/LongColumnStatsMerger.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; - -public class LongColumnStatsMerger extends ColumnStatsMerger { - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - LongColumnStatsData aggregateData = aggregateColStats.getStatsData().getLongStats(); - LongColumnStatsData newData = newColStats.getStatsData().getLongStats(); - aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); - aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 - || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } else { - NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(aggregateData.getBitVectors()); - NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors()); - long ndv = -1; - if (oldEst.canMerge(newEst)) { - oldEst.mergeEstimators(newEst); - ndv = oldEst.estimateNumDistinctValues(); - aggregateData.setBitVectors(oldEst.serialize()); - } else { - ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); - } - LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " - + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); - aggregateData.setNumDVs(ndv); - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/StringColumnStatsMerger.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/StringColumnStatsMerger.java deleted file mode 100644 index b3cd33c..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/merge/StringColumnStatsMerger.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hive.metastore.columnstats.merge; - -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; -import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; - -public class StringColumnStatsMerger extends ColumnStatsMerger { - @Override - public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { - StringColumnStatsData aggregateData = aggregateColStats.getStatsData().getStringStats(); - StringColumnStatsData newData = newColStats.getStatsData().getStringStats(); - aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); - aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); - aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); - if (!aggregateData.isSetBitVectors() || aggregateData.getBitVectors().length() == 0 - || !newData.isSetBitVectors() || newData.getBitVectors().length() == 0) { - aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); - } else { - NumDistinctValueEstimator oldEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(aggregateData.getBitVectors()); - NumDistinctValueEstimator newEst = NumDistinctValueEstimatorFactory - .getNumDistinctValueEstimator(newData.getBitVectors()); - long ndv = -1; - if (oldEst.canMerge(newEst)) { - oldEst.mergeEstimators(newEst); - ndv = oldEst.estimateNumDistinctValues(); - aggregateData.setBitVectors(oldEst.serialize()); - } else { - ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs()); - } - LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of " - + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv); - aggregateData.setNumDVs(ndv); - } - } -} diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto deleted file mode 100644 index 53c381b..0000000 --- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ /dev/null @@ -1,368 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -enum PrincipalType { - USER = 0; - ROLE = 1; -} - -message AggrStats { - required int64 parts_found = 1; - repeated ColumnStats col_stats = 2; -} - -message AggrStatsBloomFilter { - message BloomFilter { - required int32 num_bits = 1; - required int32 num_funcs = 2; - repeated int64 bits = 3; - } - required bytes db_name = 1; - required bytes table_name = 2; - required BloomFilter bloom_filter = 3; - required int64 aggregated_at = 4; -} - -message AggrStatsInvalidatorFilter { - message Entry { - required bytes db_name = 1; - required bytes table_name = 2; - required bytes part_name = 3; - } - - repeated Entry to_invalidate = 1; - required int64 run_every = 2; - required int64 max_cache_entry_life = 3; -} - -message ColumnStats { - - message BooleanStats { - optional int64 num_trues = 1; - optional int64 num_falses = 2; - } - - message LongStats { - optional sint64 low_value = 1; - optional sint64 high_value = 2; - } - - message DoubleStats { - optional double low_value = 1; - optional double high_value = 2; - } - - message StringStats { - optional int64 max_col_length = 1; - optional double avg_col_length = 2; - } - - message DecimalStats { - message Decimal { - required bytes unscaled = 1; - required int32 scale = 2; - } - optional Decimal low_value = 1; - optional Decimal high_value = 2; - } - - optional int64 last_analyzed = 1; - required string column_type = 2; - optional int64 num_nulls = 3; - optional int64 num_distinct_values = 4; - optional BooleanStats bool_stats = 5; - optional LongStats long_stats = 6; - optional DoubleStats double_stats = 7; - optional StringStats string_stats = 8; - optional StringStats binary_stats = 9; - optional DecimalStats decimal_stats = 10; - optional string column_name = 11; - optional string bit_vectors = 12; -} - -message Database { - optional string description = 1; - optional string uri = 2; - optional Parameters parameters = 3; - optional PrincipalPrivilegeSet privileges = 4; - optional string owner_name = 5; - optional PrincipalType owner_type = 6; -} - -message DelegationToken { - required string token_str = 1; -} - -message FieldSchema { - required string name = 1; - required string type = 2; - optional string comment = 3; -} - -message Function { - enum FunctionType { - JAVA = 1; - } - - message ResourceUri { - enum ResourceType { - JAR = 1; - FILE = 2; - ARCHIVE = 3; - } - required ResourceType resource_type = 1; - required string uri = 2; - } - - optional string class_name = 1; - optional string owner_name = 2; - optional PrincipalType owner_type = 3; - optional sint64 create_time = 4; - optional FunctionType function_type = 5; - repeated ResourceUri resource_uris = 6; -} - -message MasterKey { - required string master_key = 1; -} - -message ParameterEntry { - required string key = 1; - required string value = 2; -} - -message Parameters { - repeated ParameterEntry parameter = 1; -} - -message Partition { - optional int64 create_time = 1; - optional int64 last_access_time = 2; - optional string location = 3; - optional Parameters sd_parameters = 4; // storage descriptor parameters - required bytes sd_hash = 5; - optional Parameters parameters = 6; // partition parameters - // We don't support partition level privileges -} - -message PrincipalPrivilegeSetEntry { - required string principal_name = 1; - repeated PrivilegeGrantInfo privileges = 2; -} - -message PrincipalPrivilegeSet { - repeated PrincipalPrivilegeSetEntry users = 1; - repeated PrincipalPrivilegeSetEntry roles = 2; -} - -message PrivilegeGrantInfo { - optional string privilege = 1; - optional int64 create_time = 2; - optional string grantor = 3; - optional PrincipalType grantor_type = 4; - optional bool grant_option = 5; -} - -message RoleGrantInfo { - required string principal_name = 1; - required PrincipalType principal_type = 2; - optional int64 add_time = 3; - optional string grantor = 4; - optional PrincipalType grantor_type = 5; - optional bool grant_option = 6; -} - -message RoleGrantInfoList { - repeated RoleGrantInfo grant_info = 1; -} - -message RoleList { - repeated string role = 1; -} - -message Role { - optional int64 create_time = 1; - optional string owner_name = 2; -} - -message StorageDescriptor { - message Order { - required string column_name = 1; - optional sint32 order = 2 [default = 1]; - } - - message SerDeInfo { - optional string name = 1; - optional string serialization_lib = 2; - optional Parameters parameters = 3; - } - - message SkewedInfo { - message SkewedColValueList { - repeated string skewed_col_value = 1; - } - - message SkewedColValueLocationMap { - repeated string key = 1; - required string value = 2; - } - - repeated string skewed_col_names = 1; - repeated SkewedColValueList skewed_col_values = 2; - repeated SkewedColValueLocationMap skewed_col_value_location_maps = 3; - } - - repeated FieldSchema cols = 1; - optional string input_format = 2; - optional string output_format = 3; - optional bool is_compressed = 4; - optional sint32 num_buckets = 5; - optional SerDeInfo serde_info = 6; - repeated string bucket_cols = 7; - repeated Order sort_cols = 8; - optional SkewedInfo skewed_info = 9; - optional bool stored_as_sub_directories = 10; -} - -message Table { - optional string owner = 1; - optional int64 create_time = 2; - optional int64 last_access_time = 3; - optional int64 retention = 4; - optional string location = 5; - optional Parameters sd_parameters = 6; // storage descriptor parameters - required bytes sd_hash = 7; - repeated FieldSchema partition_keys = 8; - optional Parameters parameters = 9; - optional string view_original_text = 10; - optional string view_expanded_text = 11; - optional string table_type = 12; - optional PrincipalPrivilegeSet privileges = 13; - optional bool is_temporary = 14; - optional bool is_rewrite_enabled = 15; -} - -message Index { - optional string indexHandlerClass = 1; // reserved - required string dbName = 2; - required string origTableName = 3; - optional string location = 4; - optional Parameters sd_parameters = 5; // storage descriptor parameters - optional int32 createTime = 6; - optional int32 lastAccessTime = 7; - optional string indexTableName = 8; - optional bytes sd_hash = 9; - optional Parameters parameters = 10; - optional bool deferredRebuild = 11; -} - -message PartitionKeyComparator { - required string names = 1; - required string types = 2; - message Mark { - required string value = 1; - required bool inclusive = 2; - } - message Range { - required string key = 1; - optional Mark start = 2; - optional Mark end = 3; - } - message Operator { - enum Type { - LIKE = 0; - NOTEQUALS = 1; - } - required Type type = 1; - required string key = 2; - required string val = 3; - } - repeated Operator op = 3; - repeated Range range = 4; -} - -message PrimaryKey { - message PrimaryKeyColumn { - required string column_name = 1; - required sint32 key_seq = 2; - } - - required string pk_name = 1; - repeated PrimaryKeyColumn cols = 2; - optional bool enable_constraint = 3; - optional bool validate_constraint = 4; - optional bool rely_constraint = 5; -} - -message ForeignKeys { - message ForeignKey { - message ForeignKeyColumn { - required string column_name = 1; - required string referenced_column_name = 2; - required sint32 key_seq = 3; - } - - required string fk_name = 1; - required string referenced_db_name = 2; - required string referenced_table_name = 3; - optional string referenced_pk_name = 4; - optional int32 update_rule = 5; - optional int32 delete_rule = 6; - repeated ForeignKeyColumn cols = 7; - optional bool enable_constraint = 8; - optional bool validate_constraint = 9; - optional bool rely_constraint = 10; - } - - repeated ForeignKey fks = 1; -} - -message UniqueConstraints { - message UniqueConstraint { - message UniqueConstraintColumn { - required string column_name = 1; - required sint32 key_seq = 2; - } - - required string uk_name = 1; - repeated UniqueConstraintColumn cols = 2; - optional bool enable_constraint = 3; - optional bool validate_constraint = 4; - optional bool rely_constraint = 5; - } - - repeated UniqueConstraint uks = 1; -} - -message NotNullConstraints { - message NotNullConstraint { - message NotNullConstraintColumn { - required string column_name = 1; - } - - required string nn_name = 1; - repeated NotNullConstraintColumn cols = 2; - optional bool enable_constraint = 3; - optional bool validate_constraint = 4; - optional bool rely_constraint = 5; - } - - repeated NotNullConstraint nns = 1; -} - diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java deleted file mode 100644 index 784648a..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.FileFormatProxy; -import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; - -/** - * Mock utilities for HBaseStore testing - */ -public class MockUtils { - - /** - * The default impl is in ql package and is not available in unit tests. - */ - public static class NOOPProxy implements PartitionExpressionProxy { - - @Override - public String convertExprToFilter(byte[] expr) throws MetaException { - return null; - } - - @Override - public boolean filterPartitionsByExpr(List partColumnNames, - List partColumnTypeInfos, byte[] expr, String defaultPartitionName, - List partitionNames) throws MetaException { - return false; - } - - @Override - public SearchArgument createSarg(byte[] expr) { - return null; - } - - @Override - public FileMetadataExprType getMetadataType(String inputFormat) { - return null; - } - - @Override - public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { - return null; - } - } - - static HBaseStore init(Configuration conf, HTableInterface htable, - final SortedMap rows) throws IOException { - ((HiveConf)conf).setVar(ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, NOOPProxy.class.getName()); - Mockito.when(htable.get(Mockito.any(Get.class))).thenAnswer(new Answer() { - @Override - public Result answer(InvocationOnMock invocation) throws Throwable { - Get get = (Get) invocation.getArguments()[0]; - Cell cell = rows.get(new String(get.getRow())); - if (cell == null) { - return new Result(); - } else { - return Result.create(new Cell[]{cell}); - } - } - }); - - Mockito.when(htable.get(Mockito.anyListOf(Get.class))).thenAnswer(new Answer() { - @Override - public Result[] answer(InvocationOnMock invocation) throws Throwable { - @SuppressWarnings("unchecked") - List gets = (List) invocation.getArguments()[0]; - Result[] results = new Result[gets.size()]; - for (int i = 0; i < gets.size(); i++) { - Cell cell = rows.get(new String(gets.get(i).getRow())); - Result result; - if (cell == null) { - result = new Result(); - } else { - result = Result.create(new Cell[]{cell}); - } - results[i] = result; - } - return results; - } - }); - - Mockito.when(htable.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer() { - @Override - public ResultScanner answer(InvocationOnMock invocation) throws Throwable { - Scan scan = (Scan)invocation.getArguments()[0]; - List results = new ArrayList(); - String start = new String(scan.getStartRow()); - String stop = new String(scan.getStopRow()); - SortedMap sub = rows.subMap(start, stop); - for (Map.Entry e : sub.entrySet()) { - results.add(Result.create(new Cell[]{e.getValue()})); - } - - final Iterator iter = results.iterator(); - - return new ResultScanner() { - @Override - public Result next() throws IOException { - return null; - } - - @Override - public Result[] next(int nbRows) throws IOException { - return new Result[0]; - } - - @Override - public void close() { - - } - - @Override - public Iterator iterator() { - return iter; - } - }; - } - }); - - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - Put put = (Put)invocation.getArguments()[0]; - rows.put(new String(put.getRow()), put.getFamilyCellMap().firstEntry().getValue().get(0)); - return null; - } - }).when(htable).put(Mockito.any(Put.class)); - - Mockito.when(htable.checkAndPut(Mockito.any(byte[].class), Mockito.any(byte[].class), - Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(Put.class))).thenAnswer( - new Answer() { - - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - // Always say it succeeded and overwrite - Put put = (Put)invocation.getArguments()[4]; - rows.put(new String(put.getRow()), - put.getFamilyCellMap().firstEntry().getValue().get(0)); - return true; - } - }); - - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - Delete del = (Delete)invocation.getArguments()[0]; - rows.remove(new String(del.getRow())); - return null; - } - }).when(htable).delete(Mockito.any(Delete.class)); - - Mockito.when(htable.checkAndDelete(Mockito.any(byte[].class), Mockito.any(byte[].class), - Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(Delete.class))).thenAnswer( - new Answer() { - - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - // Always say it succeeded - Delete del = (Delete)invocation.getArguments()[4]; - rows.remove(new String(del.getRow())); - return true; - } - }); - - // Mock connection - HBaseConnection hconn = Mockito.mock(HBaseConnection.class); - Mockito.when(hconn.getHBaseTable(Mockito.anyString())).thenReturn(htable); - HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); - HBaseReadWrite.setTestConnection(hconn); - HBaseReadWrite.setConf(conf); - HBaseStore store = new HBaseStore(); - store.setConf(conf); - return store; - } -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java deleted file mode 100644 index c6a134c..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java +++ /dev/null @@ -1,316 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestHBaseAggregateStatsCache { - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseAggregateStatsCache.class.getName()); - - @Mock HTableInterface htable; - private HBaseStore store; - SortedMap rows = new TreeMap<>(); - - @Before - public void before() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - store = MockUtils.init(conf, htable, rows); - store.backdoor().getStatsCache().resetCounters(); - } - - private static interface Checker { - void checkStats(AggrStats aggrStats) throws Exception; - } - - // Do to limitations in the Mock infrastructure we use for HBase testing we can only test - // this for a single column table and we can't really test hits in hbase, only in memory or - // build from scratch. But it's still useful to cover many bugs. More in depth testing with - // multiple columns and with HBase hits is done in TestHBaseAggrStatsCacheIntegration. - - @Test - public void allWithStats() throws Exception { - String dbName = "default"; - String tableName = "hit"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - for (List partVals : Arrays.asList(partVals1, partVals2)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); - bcsd.setNumFalses(10); - bcsd.setNumTrues(20); - bcsd.setNumNulls(30); - data.setBooleanStats(bcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(2, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("boolean", cso.getColType()); - BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); - Assert.assertEquals(20, bcsd.getNumFalses()); - Assert.assertEquals(40, bcsd.getNumTrues()); - Assert.assertEquals(60, bcsd.getNumNulls()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - // Call again, this time it should come from memory. Also, reverse the name order this time - // to assure that we still hit. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - } - - - @Test - public void noneWithStats() throws Exception { - String dbName = "default"; - String tableName = "nws"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - for (List partVals : Arrays.asList(partVals1, partVals2)) { - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/nws/ds=" + partVals.get(0)); - Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - store.addPartition(part); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(0, aggrStats.getPartsFound()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - } - - @Test - public void someNonexistentPartitions() throws Exception { - String dbName = "default"; - String tableName = "snp"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVals1.get(0)); - Partition part = new Partition(partVals1, dbName, tableName, (int) now, (int) now, psd, - Collections.emptyMap()); - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals1.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("double"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); - dcsd.setHighValue(1000.2342343); - dcsd.setLowValue(-20.1234213423); - dcsd.setNumNulls(30); - dcsd.setNumDVs(12342); - data.setDoubleStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals1); - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(1, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("double", cso.getColType()); - DoubleColumnStatsData dcsd = cso.getStatsData().getDoubleStats(); - Assert.assertEquals(1000.23, dcsd.getHighValue(), 0.01); - Assert.assertEquals(-20.12, dcsd.getLowValue(), 0.01); - Assert.assertEquals(30, dcsd.getNumNulls()); - Assert.assertEquals(12342, dcsd.getNumDVs()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - // Call again, this time it should come from memory. Also, reverse the name order this time - // to assure that we still hit. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - } - - @Test - public void nonexistentPartitions() throws Exception { - String dbName = "default"; - String tableName = "nep"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections.emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); - store.createTable(table); - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(0, aggrStats.getPartsFound()); - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - } - // TODO test invalidation -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java deleted file mode 100644 index 9cf1fb8..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java +++ /dev/null @@ -1,193 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestHBaseAggregateStatsCacheWithBitVector { - private static final Logger LOG = LoggerFactory - .getLogger(TestHBaseAggregateStatsCacheWithBitVector.class.getName()); - - @Mock - HTableInterface htable; - private HBaseStore store; - SortedMap rows = new TreeMap<>(); - - @Before - public void before() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - store = MockUtils.init(conf, htable, rows); - store.backdoor().getStatsCache().resetCounters(); - } - - private static interface Checker { - void checkStats(AggrStats aggrStats) throws Exception; - } - - @Test - public void allPartitions() throws Exception { - String dbName = "default"; - String tableName = "snp"; - List partVals1 = Arrays.asList("today"); - List partVals2 = Arrays.asList("yesterday"); - long now = System.currentTimeMillis(); - - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVals1.get(0)); - Partition part = new Partition(partVals1, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - - psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVals2.get(0)); - part = new Partition(partVals2, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals1.get(0)); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("double"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); - dcsd.setHighValue(1000.2342343); - dcsd.setLowValue(-20.1234213423); - dcsd.setNumNulls(30); - dcsd.setNumDVs(12342); - HyperLogLog hll = HyperLogLog.builder().build(); - hll.addDouble(1); - hll.addDouble(2); - hll.addDouble(3); - dcsd.setBitVectors(hll.serialize()); - data.setDoubleStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVals1); - - cs = new ColumnStatistics(); - desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVals2.get(0)); - cs.setStatsDesc(desc); - obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("double"); - data = new ColumnStatisticsData(); - dcsd = new DoubleColumnStatsData(); - dcsd.setHighValue(1000.2342343); - dcsd.setLowValue(-20.1234213423); - dcsd.setNumNulls(30); - dcsd.setNumDVs(12342); - hll = HyperLogLog.builder().build(); - hll.addDouble(3); - hll.addDouble(4); - hll.addDouble(5); - dcsd.setBitVectors(hll.serialize()); - data.setDoubleStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - - store.updatePartitionColumnStatistics(cs, partVals2); - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(2, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("double", cso.getColType()); - DoubleColumnStatsData dcsd = cso.getStatsData().getDoubleStats(); - Assert.assertEquals(1000.23, dcsd.getHighValue(), 0.01); - Assert.assertEquals(-20.12, dcsd.getLowValue(), 0.01); - Assert.assertEquals(60, dcsd.getNumNulls()); - Assert.assertEquals(5, dcsd.getNumDVs()); - // we do not store the bitvector for the aggrStats. - // we can store that if it is necessary in the future. - } - }; - - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - // Check that we had to build it from the stats - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - - // Call again, this time it should come from memory. Also, reverse the name - // order this time - // to assure that we still hit. - aggrStats = store.get_aggr_stats_for(dbName, tableName, - Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - - Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); - Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); - Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); - } - -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java deleted file mode 100644 index 4d868b0..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java +++ /dev/null @@ -1,721 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.StatObjectConverter; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestHBaseAggregateStatsExtrapolation { - private static final Logger LOG = LoggerFactory - .getLogger(TestHBaseAggregateStatsExtrapolation.class.getName()); - - @Mock - HTableInterface htable; - private HBaseStore store; - SortedMap rows = new TreeMap<>(); - - // NDV will be 3 for the bitVectors - String bitVectors = null; - @Before - public void before() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - store = MockUtils.init(conf, htable, rows); - store.backdoor().getStatsCache().resetCounters(); - HyperLogLog hll = HyperLogLog.builder().build(); - hll.addLong(1); - hll.addLong(2); - hll.addLong(3); - bitVectors = hll.serialize(); - } - - private static interface Checker { - void checkStats(AggrStats aggrStats) throws Exception; - } - - @Test - public void allPartitionsHaveBitVectorStatusLong() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1009, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - } - - @Test - public void allPartitionsHaveBitVectorStatusDecimal() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1_decimal", "decimal", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1_decimal"); - obj.setColType("decimal"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DecimalColumnStatsData dcsd = new DecimalColumnStatsData(); - dcsd.setHighValue(StatObjectConverter.createThriftDecimal("" + (1000 + i))); - dcsd.setLowValue(StatObjectConverter.createThriftDecimal("" + (-1000 - i))); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors); - data.setDecimalStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1_decimal", cso.getColName()); - Assert.assertEquals("decimal", cso.getColType()); - DecimalColumnStatsData lcsd = cso.getStatsData().getDecimalStats(); - Assert.assertEquals(1009, HBaseUtils.getDoubleValue(lcsd.getHighValue()), 0.01); - Assert.assertEquals(-1009, HBaseUtils.getDoubleValue(lcsd.getLowValue()), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col1_decimal")); - statChecker.checkStats(aggrStats); - } - - @Test - public void allPartitionsHaveBitVectorStatusDouble() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1_double", "double", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1_double"); - obj.setColType("double"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors); - data.setDoubleStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1_double", cso.getColName()); - Assert.assertEquals("double", cso.getColType()); - DoubleColumnStatsData lcsd = cso.getStatsData().getDoubleStats(); - Assert.assertEquals(1009, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col1_double")); - statChecker.checkStats(aggrStats); - } - - @Test - public void allPartitionsHaveBitVectorStatusString() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1_string", "string", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1_string"); - obj.setColType("string"); - ColumnStatisticsData data = new ColumnStatisticsData(); - StringColumnStatsData dcsd = new StringColumnStatsData(); - dcsd.setAvgColLen(i + 1); - dcsd.setMaxColLen(i + 10); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors); - data.setStringStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1_string", cso.getColName()); - Assert.assertEquals("string", cso.getColType()); - StringColumnStatsData lcsd = cso.getStatsData().getStringStats(); - Assert.assertEquals(10, lcsd.getAvgColLen(), 0.01); - Assert.assertEquals(19, lcsd.getMaxColLen(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col1_string")); - statChecker.checkStats(aggrStats); - } - - @Test - public void noPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col2", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col2"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(i == 0 ? 1 : 10 * i); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col2", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1009, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(90, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col2")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col3", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i < 2 || i > 7) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col3"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i); - dcsd.setBitVectors(bitVectors); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(4, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col3", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1010, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col3")); - statChecker.checkStats(aggrStats); - } - - @Test - public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col4", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i > 2 && i < 7) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col4"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i); - dcsd.setBitVectors(bitVectors); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(4, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col4", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1006, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1006, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col4")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col5", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i == 0 || i == 2 || i == 3 || i == 5 || i == 6 || i == 8) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col5"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i); - dcsd.setBitVectors(bitVectors); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(6, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col5", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1010, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01); - Assert.assertEquals(40, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col5")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col5_double", "double", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i == 0 || i == 2 || i == 3 || i == 5 || i == 6 || i == 8) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col5_double"); - obj.setColType("double"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i); - dcsd.setBitVectors(bitVectors); - data.setDoubleStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(6, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col5_double", cso.getColName()); - Assert.assertEquals("double", cso.getColType()); - DoubleColumnStatsData lcsd = cso.getStatsData().getDoubleStats(); - Assert.assertEquals(1010, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01); - Assert.assertEquals(40, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col5_double")); - statChecker.checkStats(aggrStats); - } -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java deleted file mode 100644 index 0ad2780..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java +++ /dev/null @@ -1,589 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.StatObjectConverter; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestHBaseAggregateStatsNDVUniformDist { - private static final Logger LOG = LoggerFactory - .getLogger(TestHBaseAggregateStatsNDVUniformDist.class.getName()); - - @Mock - HTableInterface htable; - private HBaseStore store; - SortedMap rows = new TreeMap<>(); - - // NDV will be 3 for bitVectors[0] and 1 for bitVectors[1] - String bitVectors[] = new String[2]; - - - @Before - public void before() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - conf.setBoolean(HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION.varname, true); - store = MockUtils.init(conf, htable, rows); - store.backdoor().getStatsCache().resetCounters(); - HyperLogLog hll = HyperLogLog.builder().build(); - hll.addLong(1); - bitVectors[1] = hll.serialize(); - hll = HyperLogLog.builder().build(); - hll.addLong(2); - hll.addLong(3); - hll.addLong(3); - hll.addLong(4); - bitVectors[0] = hll.serialize(); - } - - private static interface Checker { - void checkStats(AggrStats aggrStats) throws Exception; - } - - @Test - public void allPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col1", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col1"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors[0]); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col1", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1009, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col1")); - statChecker.checkStats(aggrStats); - } - - @Test - public void noPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col2", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col2"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(10, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col2", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1009, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(91, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col2")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col3", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i < 2 || i > 7) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col3"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors[i / 5]); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(4, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col3", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1010, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col3")); - statChecker.checkStats(aggrStats); - } - - @Test - public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col4", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i > 2 && i < 7) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col4"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors[0]); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(4, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col4", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1006, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1006, lcsd.getLowValue(), 0.01); - Assert.assertEquals(45, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col4")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col5_long", "long", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i == 0 || i == 2 || i == 3 || i == 5 || i == 6 || i == 8) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col5_long"); - obj.setColType("long"); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData dcsd = new LongColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors[i / 5]); - data.setLongStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(6, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col5_long", cso.getColName()); - Assert.assertEquals("long", cso.getColType()); - LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); - Assert.assertEquals(1010, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01); - Assert.assertEquals(40, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col5_long")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDecimal() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col5_decimal", "decimal", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i == 0 || i == 2 || i == 3 || i == 5 || i == 6 || i == 8) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col5_decimal"); - obj.setColType("decimal"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DecimalColumnStatsData dcsd = new DecimalColumnStatsData(); - dcsd.setHighValue(StatObjectConverter.createThriftDecimal("" + (1000 + i))); - dcsd.setLowValue(StatObjectConverter.createThriftDecimal("" + (-1000 - i))); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors[i / 5]); - data.setDecimalStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(6, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col5_decimal", cso.getColName()); - Assert.assertEquals("decimal", cso.getColType()); - DecimalColumnStatsData lcsd = cso.getStatsData().getDecimalStats(); - Assert.assertEquals(1010, HBaseUtils.getDoubleValue(lcsd.getHighValue()), 0.01); - Assert.assertEquals(-1010, HBaseUtils.getDoubleValue(lcsd.getLowValue()), 0.01); - Assert.assertEquals(40, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col5_decimal")); - statChecker.checkStats(aggrStats); - } - - @Test - public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Exception { - String dbName = "default"; - String tableName = "snp"; - long now = System.currentTimeMillis(); - List cols = new ArrayList<>(); - cols.add(new FieldSchema("col5_double", "double", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); - List partCols = new ArrayList<>(); - partCols.add(new FieldSchema("ds", "string", "")); - Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); - store.createTable(table); - - List> partVals = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - List partVal = Arrays.asList("" + i); - partVals.add(partVal); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/default/hit/ds=" + partVal); - Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); - store.addPartition(part); - if (i == 0 || i == 2 || i == 3 || i == 5 || i == 6 || i == 8) { - ColumnStatistics cs = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); - desc.setLastAnalyzed(now); - desc.setPartName("ds=" + partVal); - cs.setStatsDesc(desc); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName("col5_double"); - obj.setColType("double"); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); - dcsd.setHighValue(1000 + i); - dcsd.setLowValue(-1000 - i); - dcsd.setNumNulls(i); - dcsd.setNumDVs(10 * i + 1); - dcsd.setBitVectors(bitVectors[i / 5]); - data.setDoubleStats(dcsd); - obj.setStatsData(data); - cs.addToStatsObj(obj); - store.updatePartitionColumnStatistics(cs, partVal); - } - } - - Checker statChecker = new Checker() { - @Override - public void checkStats(AggrStats aggrStats) throws Exception { - Assert.assertEquals(6, aggrStats.getPartsFound()); - Assert.assertEquals(1, aggrStats.getColStatsSize()); - ColumnStatisticsObj cso = aggrStats.getColStats().get(0); - Assert.assertEquals("col5_double", cso.getColName()); - Assert.assertEquals("double", cso.getColType()); - DoubleColumnStatsData lcsd = cso.getStatsData().getDoubleStats(); - Assert.assertEquals(1010, lcsd.getHighValue(), 0.01); - Assert.assertEquals(-1010, lcsd.getLowValue(), 0.01); - Assert.assertEquals(40, lcsd.getNumNulls()); - Assert.assertEquals(3, lcsd.getNumDVs()); - } - }; - List partNames = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - partNames.add("ds=" + i); - } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, - Arrays.asList("col5_double")); - statChecker.checkStats(aggrStats); - } -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java deleted file mode 100644 index 06884b3..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java +++ /dev/null @@ -1,483 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hive.metastore.PartFilterExprUtil; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.FilterPlan; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.MultiScanPlan; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; -import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan.ScanMarker; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; -import org.junit.Assert; -import org.junit.Test; - -import com.google.common.primitives.Shorts; - -public class TestHBaseFilterPlanUtil { - final boolean INCLUSIVE = true; - - /** - * Test the function that compares byte arrays - */ - @Test - public void testCompare() { - - Assert.assertEquals(-1, HBaseFilterPlanUtil.compare(new byte[] { 1, 2 }, new byte[] { 1, 3 })); - Assert.assertEquals(-1, - HBaseFilterPlanUtil.compare(new byte[] { 1, 2, 3 }, new byte[] { 1, 3 })); - Assert.assertEquals(-1, - HBaseFilterPlanUtil.compare(new byte[] { 1, 2 }, new byte[] { 1, 2, 3 })); - - Assert.assertEquals(0, HBaseFilterPlanUtil.compare(new byte[] { 3, 2 }, new byte[] { 3, 2 })); - - Assert - .assertEquals(1, HBaseFilterPlanUtil.compare(new byte[] { 3, 2, 1 }, new byte[] { 3, 2 })); - Assert - .assertEquals(1, HBaseFilterPlanUtil.compare(new byte[] { 3, 3, 1 }, new byte[] { 3, 2 })); - - } - - /** - * Test function that finds greater/lesser marker - */ - @Test - public void testgetComparedMarker() { - ScanMarker l; - ScanMarker r; - - // equal plans - l = new ScanMarker("1", INCLUSIVE, "int"); - r = new ScanMarker("1", INCLUSIVE, "int"); - assertFirstGreater(l, r); - - l = new ScanMarker("1", !INCLUSIVE, "int"); - r = new ScanMarker("1", !INCLUSIVE, "int"); - assertFirstGreater(l, r); - - assertFirstGreater(null, null); - - // create l is greater because of inclusive flag - l = new ScanMarker("1", !INCLUSIVE, "int"); - // the rule for null vs non-null is different - // non-null is both smaller and greater than null - Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false)); - Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false)); - - // create l that is greater because of the bytes - l = new ScanMarker("2", INCLUSIVE, "int"); - r = new ScanMarker("1", INCLUSIVE, "int"); - assertFirstGreater(l, r); - - } - - private void assertFirstGreater(ScanMarker big, ScanMarker small) { - Assert.assertEquals(big, ScanPlan.getComparedMarker(big, small, true)); - Assert.assertEquals(big, ScanPlan.getComparedMarker(small, big, true)); - Assert.assertEquals(small, ScanPlan.getComparedMarker(big, small, false)); - Assert.assertEquals(small, ScanPlan.getComparedMarker(small, big, false)); - } - - /** - * Test ScanPlan AND operation - */ - @Test - public void testScanPlanAnd() { - ScanPlan l = new ScanPlan(); - ScanPlan r = new ScanPlan(); - l.setStartMarker("a", "int", "10", INCLUSIVE); - r.setStartMarker("a", "int", "10", INCLUSIVE); - - ScanPlan res; - // both equal - res = l.and(r).getPlans().get(0); - Assert.assertEquals(new ScanMarker("10", INCLUSIVE, "int"), res.markers.get("a").startMarker); - - // add equal end markers as well, and test AND again - l.setEndMarker("a", "int", "20", INCLUSIVE); - r.setEndMarker("a", "int", "20", INCLUSIVE); - res = l.and(r).getPlans().get(0); - Assert.assertEquals(new ScanMarker("10", INCLUSIVE, "int"), res.markers.get("a").startMarker); - Assert.assertEquals(new ScanMarker("20", INCLUSIVE, "int"), res.markers.get("a").endMarker); - - l.setStartMarker("a", "int", "10", !INCLUSIVE); - l.setEndMarker("a", "int", "20", INCLUSIVE); - - r.setStartMarker("a", "int", "10", INCLUSIVE); - r.setEndMarker("a", "int", "15", INCLUSIVE); - res = l.and(r).getPlans().get(0); - // start of l is greater, end of r is smaller - Assert.assertEquals(l.markers.get("a").startMarker, res.markers.get("a").startMarker); - Assert.assertEquals(r.markers.get("a").endMarker, res.markers.get("a").endMarker); - - } - - /** - * Test ScanPlan OR operation - */ - @Test - public void testScanPlanOr() { - ScanPlan l = new ScanPlan(); - ScanPlan r = new ScanPlan(); - l.setStartMarker("a", "int", "1", INCLUSIVE); - r.setStartMarker("a", "int", "11", INCLUSIVE); - - FilterPlan res1 = l.or(r); - Assert.assertEquals(2, res1.getPlans().size()); - res1.getPlans().get(0).markers.get("a").startMarker.equals(l.markers.get("a").startMarker); - res1.getPlans().get(1).markers.get("a").startMarker.equals(r.markers.get("a").startMarker); - - FilterPlan res2 = res1.or(r); - Assert.assertEquals(3, res2.getPlans().size()); - } - - /** - * Test MultiScanPlan OR - */ - @Test - public void testMultiScanPlanOr() { - - MultiScanPlan l = createMultiScanPlan(new ScanPlan()); - MultiScanPlan r = createMultiScanPlan(new ScanPlan()); - // verify OR of two multi plans with one plan each - Assert.assertEquals(2, l.or(r).getPlans().size()); - - // verify OR of multi plan with a single scanplan - Assert.assertEquals(2, l.or(new ScanPlan()).getPlans().size()); - Assert.assertEquals(2, (new ScanPlan()).or(l).getPlans().size()); - - // verify or of two multiplans with more than one scan plan - r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); - Assert.assertEquals(3, l.or(r).getPlans().size()); - Assert.assertEquals(3, r.or(l).getPlans().size()); - - } - - private MultiScanPlan createMultiScanPlan(ScanPlan... scanPlans) { - return new MultiScanPlan(Arrays.asList(scanPlans)); - } - - /** - * Test MultiScanPlan AND - */ - @Test - public void testMultiScanPlanAnd() { - MultiScanPlan l = createMultiScanPlan(new ScanPlan()); - MultiScanPlan r = createMultiScanPlan(new ScanPlan()); - - // two MultiScanPlan with single scan plans should result in new FilterPlan - // with just one scan - Assert.assertEquals(1, l.and(r).getPlans().size()); - - // l has one ScanPlan, r has two. AND result should have two - r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); - Assert.assertEquals(2, l.and(r).getPlans().size()); - Assert.assertEquals(2, r.and(l).getPlans().size()); - - // l has 2 ScanPlans, r has 3. AND result should have 6 - l = createMultiScanPlan(new ScanPlan(), new ScanPlan()); - r = createMultiScanPlan(new ScanPlan(), new ScanPlan(), new ScanPlan()); - Assert.assertEquals(6, l.and(r).getPlans().size()); - Assert.assertEquals(6, r.and(l).getPlans().size()); - } - - /** - * Test plan generation from LeafNode - * - * @throws MetaException - */ - @Test - public void testLeafNodePlan() throws MetaException { - - final String KEY = "k1"; - final String VAL = "v1"; - final String OTHERKEY = "k2"; - LeafNode l = new LeafNode(); - l.keyName = KEY; - l.value = VAL; - final ScanMarker DEFAULT_SCANMARKER = null; - List parts = new ArrayList(); - parts.add(new FieldSchema(KEY, "int", null)); - parts.add(new FieldSchema(OTHERKEY, "int", null)); - - l.operator = Operator.EQUALS; - verifyPlan(l, parts, KEY, new ScanMarker(VAL, INCLUSIVE, "int"), new ScanMarker(VAL, INCLUSIVE, "int")); - - l.operator = Operator.GREATERTHAN; - verifyPlan(l, parts, KEY, new ScanMarker(VAL, !INCLUSIVE, "int"), DEFAULT_SCANMARKER); - - l.operator = Operator.GREATERTHANOREQUALTO; - verifyPlan(l, parts, KEY, new ScanMarker(VAL, INCLUSIVE, "int"), DEFAULT_SCANMARKER); - - l.operator = Operator.LESSTHAN; - verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL, !INCLUSIVE, "int")); - - l.operator = Operator.LESSTHANOREQUALTO; - verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL, INCLUSIVE, "int")); - - // following leaf node plans should currently have true for 'has unsupported condition', - // because of the condition is not on first key - l.operator = Operator.EQUALS; - verifyPlan(l, parts, OTHERKEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, false); - - // if tree is null, it should return equivalent of full scan, and true - // for 'has unsupported condition' - verifyPlan(null, parts, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true); - - } - - private void verifyPlan(TreeNode l, List parts, String keyName, ScanMarker startMarker, ScanMarker endMarker) - throws MetaException { - verifyPlan(l, parts, keyName, startMarker, endMarker, false); - } - - private void verifyPlan(TreeNode l, List parts, String keyName, ScanMarker startMarker, ScanMarker endMarker, - boolean hasUnsupportedCondition) throws MetaException { - ExpressionTree e = null; - if (l != null) { - e = new ExpressionTree(); - e.setRootForTest(l); - } - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); - FilterPlan plan = planRes.plan; - Assert.assertEquals("Has unsupported condition", hasUnsupportedCondition, - planRes.hasUnsupportedCondition); - Assert.assertEquals(1, plan.getPlans().size()); - ScanPlan splan = plan.getPlans().get(0); - if (startMarker != null) { - Assert.assertEquals(startMarker, splan.markers.get(keyName).startMarker); - } else { - Assert.assertTrue(splan.markers.get(keyName)==null || - splan.markers.get(keyName).startMarker==null); - } - if (endMarker != null) { - Assert.assertEquals(endMarker, splan.markers.get(keyName).endMarker); - } else { - Assert.assertTrue(splan.markers.get(keyName)==null || - splan.markers.get(keyName).endMarker==null); - } - } - - /** - * Test plan generation from TreeNode - * - * @throws MetaException - */ - @Test - public void testTreeNodePlan() throws MetaException { - - final String KEY = "k1"; - final String VAL1 = "10"; - final String VAL2 = "11"; - LeafNode l = new LeafNode(); - l.keyName = KEY; - l.value = VAL1; - final ScanMarker DEFAULT_SCANMARKER = null; - - List parts = new ArrayList(); - parts.add(new FieldSchema("k1", "int", null)); - - LeafNode r = new LeafNode(); - r.keyName = KEY; - r.value = VAL2; - - TreeNode tn = new TreeNode(l, LogicalOperator.AND, r); - - // verify plan for - k1 >= '10' and k1 < '11' - l.operator = Operator.GREATERTHANOREQUALTO; - r.operator = Operator.LESSTHAN; - verifyPlan(tn, parts, KEY, new ScanMarker(VAL1, INCLUSIVE, "int"), new ScanMarker(VAL2, - !INCLUSIVE, "int")); - - // verify plan for - k1 >= '10' and k1 > '11' - l.operator = Operator.GREATERTHANOREQUALTO; - r.operator = Operator.GREATERTHAN; - verifyPlan(tn, parts, KEY, new ScanMarker(VAL2, !INCLUSIVE, "int"), DEFAULT_SCANMARKER); - - // verify plan for - k1 >= '10' or k1 > '11' - tn = new TreeNode(l, LogicalOperator.OR, r); - ExpressionTree e = new ExpressionTree(); - e.setRootForTest(tn); - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); - Assert.assertEquals(2, planRes.plan.getPlans().size()); - Assert.assertEquals(false, planRes.hasUnsupportedCondition); - - // verify plan for - k1 >= '10' and (k1 >= '10' or k1 > '11') - TreeNode tn2 = new TreeNode(l, LogicalOperator.AND, tn); - e = new ExpressionTree(); - e.setRootForTest(tn2); - planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); - Assert.assertEquals(2, planRes.plan.getPlans().size()); - Assert.assertEquals(false, planRes.hasUnsupportedCondition); - - // verify plan for (k1 >= '10' and (k1 >= '10' or k1 > '11')) or k1 LIKE '2' - // plan should return true for hasUnsupportedCondition - LeafNode klike = new LeafNode(); - klike.keyName = KEY; - klike.value = VAL1; - klike.operator = Operator.LIKE; - TreeNode tn3 = new TreeNode(tn2, LogicalOperator.OR, klike); - e = new ExpressionTree(); - e.setRootForTest(tn3); - planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); - Assert.assertEquals(3, planRes.plan.getPlans().size()); - Assert.assertEquals(false, planRes.hasUnsupportedCondition); - - - } - - @Test - public void testPartitionKeyScannerAllString() throws Exception { - List parts = new ArrayList(); - parts.add(new FieldSchema("year", "string", null)); - parts.add(new FieldSchema("month", "string", null)); - parts.add(new FieldSchema("state", "string", null)); - - // One prefix key and one minor key range - ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); - - Assert.assertEquals(planRes.plan.getPlans().size(), 1); - - ScanPlan sp = planRes.plan.getPlans().get(0); - byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); - RowFilter filter = (RowFilter)sp.getFilter(parts); - - // scan range contains the major key year, rowfilter contains minor key state - Assert.assertTrue(Bytes.contains(startRowSuffix, "2015".getBytes())); - Assert.assertTrue(Bytes.contains(endRowSuffix, "2015".getBytes())); - Assert.assertFalse(Bytes.contains(startRowSuffix, "CA".getBytes())); - Assert.assertFalse(Bytes.contains(endRowSuffix, "CA".getBytes())); - - PartitionKeyComparator comparator = (PartitionKeyComparator)filter.getComparator(); - Assert.assertEquals(comparator.ranges.size(), 1); - Assert.assertEquals(comparator.ranges.get(0).keyName, "state"); - - // Two prefix key and one LIKE operator - exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and month > 10 " - + "and month <= 11 and state like 'C%'").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); - - Assert.assertEquals(planRes.plan.getPlans().size(), 1); - - sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); - filter = (RowFilter)sp.getFilter(parts); - - // scan range contains the major key value year/month, rowfilter contains LIKE operator - Assert.assertTrue(Bytes.contains(startRowSuffix, "2015".getBytes())); - Assert.assertTrue(Bytes.contains(endRowSuffix, "2015".getBytes())); - Assert.assertTrue(Bytes.contains(startRowSuffix, "10".getBytes())); - Assert.assertTrue(Bytes.contains(endRowSuffix, "11".getBytes())); - - comparator = (PartitionKeyComparator)filter.getComparator(); - Assert.assertEquals(comparator.ops.size(), 1); - Assert.assertEquals(comparator.ops.get(0).keyName, "state"); - - // One prefix key, one minor key range and one LIKE operator - exprTree = PartFilterExprUtil.getFilterParser("year >= 2014 and month > 10 " - + "and month <= 11 and state like 'C%'").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); - - Assert.assertEquals(planRes.plan.getPlans().size(), 1); - - sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); - filter = (RowFilter)sp.getFilter(parts); - - // scan range contains the major key value year (low bound), rowfilter contains minor key state - // and LIKE operator - Assert.assertTrue(Bytes.contains(startRowSuffix, "2014".getBytes())); - - comparator = (PartitionKeyComparator)filter.getComparator(); - Assert.assertEquals(comparator.ranges.size(), 1); - Assert.assertEquals(comparator.ranges.get(0).keyName, "month"); - Assert.assertEquals(comparator.ops.size(), 1); - Assert.assertEquals(comparator.ops.get(0).keyName, "state"); - - // Condition contains or - exprTree = PartFilterExprUtil.getFilterParser("year = 2014 and (month > 10 " - + "or month < 3)").tree; - planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); - - sp = planRes.plan.getPlans().get(0); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); - filter = (RowFilter)sp.getFilter(parts); - - // The first ScanPlan contains year = 2014 and month > 10 - Assert.assertTrue(Bytes.contains(startRowSuffix, "2014".getBytes())); - Assert.assertTrue(Bytes.contains(endRowSuffix, "2014".getBytes())); - Assert.assertTrue(Bytes.contains(startRowSuffix, "10".getBytes())); - - sp = planRes.plan.getPlans().get(1); - startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); - filter = (RowFilter)sp.getFilter(parts); - - // The first ScanPlan contains year = 2014 and month < 3 - Assert.assertTrue(Bytes.contains(startRowSuffix, "2014".getBytes())); - Assert.assertTrue(Bytes.contains(endRowSuffix, "2014".getBytes())); - Assert.assertTrue(Bytes.contains(endRowSuffix, "3".getBytes())); - } - - @Test - public void testPartitionKeyScannerMixedType() throws Exception { - List parts = new ArrayList(); - parts.add(new FieldSchema("year", "int", null)); - parts.add(new FieldSchema("month", "int", null)); - parts.add(new FieldSchema("state", "string", null)); - - // One prefix key and one minor key range - ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; - PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); - - Assert.assertEquals(planRes.plan.getPlans().size(), 1); - - ScanPlan sp = planRes.plan.getPlans().get(0); - byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); - byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); - RowFilter filter = (RowFilter)sp.getFilter(parts); - - // scan range contains the major key year, rowfilter contains minor key state - Assert.assertTrue(Bytes.contains(startRowSuffix, Shorts.toByteArray((short)2015))); - Assert.assertTrue(Bytes.contains(endRowSuffix, Shorts.toByteArray((short)2016))); - - PartitionKeyComparator comparator = (PartitionKeyComparator)filter.getComparator(); - Assert.assertEquals(comparator.ranges.size(), 1); - Assert.assertEquals(comparator.ranges.get(0).keyName, "state"); - } -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java deleted file mode 100644 index 4aa8c34..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ /dev/null @@ -1,1978 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * - */ -public class TestHBaseStore { - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStore.class.getName()); - static Map emptyParameters = new HashMap(); - // Table with NUM_PART_KEYS partitioning keys and NUM_PARTITIONS values per key - static final int NUM_PART_KEYS = 1; - static final int NUM_PARTITIONS = 5; - static final String DB = "db"; - static final String TBL = "tbl"; - static final String COL = "col"; - static final String PART_KEY_PREFIX = "part"; - static final String PART_VAL_PREFIX = "val"; - static final String PART_KV_SEPARATOR = "="; - static final List PART_KEYS = new ArrayList(); - static final List PART_VALS = new ArrayList(); - // Initialize mock partitions - static { - for (int i = 1; i <= NUM_PART_KEYS; i++) { - PART_KEYS.add(PART_KEY_PREFIX + i); - } - for (int i = 1; i <= NUM_PARTITIONS; i++) { - PART_VALS.add(PART_VAL_PREFIX + i); - } - } - static final long DEFAULT_TIME = System.currentTimeMillis(); - static final String PART_KEY = "part"; - static final String BOOLEAN_COL = "boolCol"; - static final String BOOLEAN_TYPE = "boolean"; - static final String BOOLEAN_VAL = "true"; - static final String LONG_COL = "longCol"; - static final String LONG_TYPE = "long"; - static final String INT_TYPE = "int"; - static final String INT_VAL = "1234"; - static final String DOUBLE_COL = "doubleCol"; - static final String DOUBLE_TYPE = "double"; - static final String DOUBLE_VAL = "3.1415"; - static final String STRING_COL = "stringCol"; - static final String STRING_TYPE = "string"; - static final String STRING_VAL = "stringval"; - static final String BINARY_COL = "binaryCol"; - static final String BINARY_TYPE = "binary"; - static final String BINARY_VAL = "1"; - static final String DECIMAL_COL = "decimalCol"; - static final String DECIMAL_TYPE = "decimal(5,3)"; - static final String DECIMAL_VAL = "12.123"; - static List booleanColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List longColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List doubleColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List stringColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List binaryColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List decimalColStatsObjs = new ArrayList( - NUM_PARTITIONS); - - @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock HTableInterface htable; - SortedMap rows = new TreeMap<>(); - HBaseStore store; - - - @BeforeClass - public static void beforeTest() { - // All data intitializations - populateMockStats(); - } - - private static void populateMockStats() { - ColumnStatisticsObj statsObj; - // Add NUM_PARTITIONS ColumnStatisticsObj of each type - // For aggregate stats test, we'll treat each ColumnStatisticsObj as stats for 1 partition - // For the rest, we'll just pick the 1st ColumnStatisticsObj from this list and use it - for (int i = 0; i < NUM_PARTITIONS; i++) { - statsObj = mockBooleanStats(i); - booleanColStatsObjs.add(statsObj); - statsObj = mockLongStats(i); - longColStatsObjs.add(statsObj); - statsObj = mockDoubleStats(i); - doubleColStatsObjs.add(statsObj); - statsObj = mockStringStats(i); - stringColStatsObjs.add(statsObj); - statsObj = mockBinaryStats(i); - binaryColStatsObjs.add(statsObj); - statsObj = mockDecimalStats(i); - decimalColStatsObjs.add(statsObj); - } - } - - private static ColumnStatisticsObj mockBooleanStats(int i) { - long trues = 37 + 100*i; - long falses = 12 + 50*i; - long nulls = 2 + i; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(BOOLEAN_COL); - colStatsObj.setColType(BOOLEAN_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData boolData = new BooleanColumnStatsData(); - boolData.setNumTrues(trues); - boolData.setNumFalses(falses); - boolData.setNumNulls(nulls); - data.setBooleanStats(boolData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockLongStats(int i) { - long high = 120938479124L + 100*i; - long low = -12341243213412124L - 50*i; - long nulls = 23 + i; - long dVs = 213L + 10*i; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(LONG_COL); - colStatsObj.setColType(LONG_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData longData = new LongColumnStatsData(); - longData.setHighValue(high); - longData.setLowValue(low); - longData.setNumNulls(nulls); - longData.setNumDVs(dVs); - data.setLongStats(longData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockDoubleStats(int i) { - double high = 123423.23423 + 100*i; - double low = 0.00001234233 - 50*i; - long nulls = 92 + i; - long dVs = 1234123421L + 10*i; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(DOUBLE_COL); - colStatsObj.setColType(DOUBLE_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); - doubleData.setHighValue(high); - doubleData.setLowValue(low); - doubleData.setNumNulls(nulls); - doubleData.setNumDVs(dVs); - data.setDoubleStats(doubleData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockStringStats(int i) { - long maxLen = 1234 + 10*i; - double avgLen = 32.3 + i; - long nulls = 987 + 10*i; - long dVs = 906 + i; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(STRING_COL); - colStatsObj.setColType(STRING_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - StringColumnStatsData stringData = new StringColumnStatsData(); - stringData.setMaxColLen(maxLen); - stringData.setAvgColLen(avgLen); - stringData.setNumNulls(nulls); - stringData.setNumDVs(dVs); - data.setStringStats(stringData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockBinaryStats(int i) {; - long maxLen = 123412987L + 10*i; - double avgLen = 76.98 + i; - long nulls = 976998797L + 10*i; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(BINARY_COL); - colStatsObj.setColType(BINARY_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - BinaryColumnStatsData binaryData = new BinaryColumnStatsData(); - binaryData.setMaxColLen(maxLen); - binaryData.setAvgColLen(avgLen); - binaryData.setNumNulls(nulls); - data.setBinaryStats(binaryData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockDecimalStats(int i) { - Decimal high = new Decimal(); - high.setScale((short)3); - String strHigh = String.valueOf(3876 + 100*i); - high.setUnscaled(strHigh.getBytes()); - Decimal low = new Decimal(); - low.setScale((short)3); - String strLow = String.valueOf(38 + i); - low.setUnscaled(strLow.getBytes()); - long nulls = 13 + i; - long dVs = 923947293L + 100*i; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(DECIMAL_COL); - colStatsObj.setColType(DECIMAL_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); - decimalData.setHighValue(high); - decimalData.setLowValue(low); - decimalData.setNumNulls(nulls); - decimalData.setNumDVs(dVs); - data.setDecimalStats(decimalData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - @AfterClass - public static void afterTest() { - } - - - @Before - public void init() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - store = MockUtils.init(conf, htable, rows); - } - - @Test - public void createDb() throws Exception { - String dbname = "mydb"; - Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - - Database d = store.getDatabase(dbname); - Assert.assertEquals(dbname, d.getName()); - Assert.assertEquals("no description", d.getDescription()); - Assert.assertEquals("file:///tmp", d.getLocationUri()); - } - - @Test - public void alterDb() throws Exception { - String dbname = "mydb"; - Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - db.setDescription("a description"); - store.alterDatabase(dbname, db); - - Database d = store.getDatabase(dbname); - Assert.assertEquals(dbname, d.getName()); - Assert.assertEquals("a description", d.getDescription()); - Assert.assertEquals("file:///tmp", d.getLocationUri()); - } - - @Test - public void dropDb() throws Exception { - String dbname = "anotherdb"; - Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); - store.createDatabase(db); - - Database d = store.getDatabase(dbname); - Assert.assertNotNull(d); - - store.dropDatabase(dbname); - thrown.expect(NoSuchObjectException.class); - store.getDatabase(dbname); - } - - @Test - public void createFunction() throws Exception { - String funcName = "createfunc"; - int now = (int)(System.currentTimeMillis()/ 1000); - Function func = new Function(funcName, DB, "o.a.h.h.myfunc", "me", PrincipalType.USER, - now, FunctionType.JAVA, Arrays.asList(new ResourceUri(ResourceType.JAR, - "file:/tmp/somewhere"))); - store.createFunction(func); - - Function f = store.getFunction(DB, funcName); - Assert.assertEquals(DB, f.getDbName()); - Assert.assertEquals(funcName, f.getFunctionName()); - Assert.assertEquals("o.a.h.h.myfunc", f.getClassName()); - Assert.assertEquals("me", f.getOwnerName()); - Assert.assertEquals(PrincipalType.USER, f.getOwnerType()); - Assert.assertTrue(now <= f.getCreateTime()); - Assert.assertEquals(FunctionType.JAVA, f.getFunctionType()); - Assert.assertEquals(1, f.getResourceUrisSize()); - Assert.assertEquals(ResourceType.JAR, f.getResourceUris().get(0).getResourceType()); - Assert.assertEquals("file:/tmp/somewhere", f.getResourceUris().get(0).getUri()); - } - - @Test - public void alterFunction() throws Exception { - String funcName = "alterfunc"; - int now = (int)(System.currentTimeMillis()/ 1000); - List uris = new ArrayList(); - uris.add(new ResourceUri(ResourceType.FILE, "whatever")); - Function func = new Function(funcName, DB, "o.a.h.h.myfunc", "me", PrincipalType.USER, - now, FunctionType.JAVA, uris); - store.createFunction(func); - - Function f = store.getFunction(DB, funcName); - Assert.assertEquals(ResourceType.FILE, f.getResourceUris().get(0).getResourceType()); - - func.addToResourceUris(new ResourceUri(ResourceType.ARCHIVE, "file")); - store.alterFunction(DB, funcName, func); - - f = store.getFunction(DB, funcName); - Assert.assertEquals(2, f.getResourceUrisSize()); - Assert.assertEquals(ResourceType.FILE, f.getResourceUris().get(0).getResourceType()); - Assert.assertEquals(ResourceType.ARCHIVE, f.getResourceUris().get(1).getResourceType()); - - } - - @Test - public void dropFunction() throws Exception { - String funcName = "delfunc"; - int now = (int)(System.currentTimeMillis()/ 1000); - Function func = new Function(funcName, DB, "o.a.h.h.myfunc", "me", PrincipalType.USER, - now, FunctionType.JAVA, Arrays.asList(new ResourceUri(ResourceType.JAR, "file:/tmp/somewhere"))); - store.createFunction(func); - - Function f = store.getFunction(DB, funcName); - Assert.assertNotNull(f); - - store.dropFunction(DB, funcName); - //thrown.expect(NoSuchObjectException.class); - Assert.assertNull(store.getFunction(DB, funcName)); - } - - @Test - public void createTable() throws Exception { - String tableName = "mytable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", tableName); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertFalse(t.getSd().isCompressed()); - Assert.assertEquals(17, t.getSd().getNumBuckets()); - Assert.assertEquals(1, t.getSd().getBucketColsSize()); - Assert.assertEquals("bucketcol", t.getSd().getBucketCols().get(0)); - Assert.assertEquals(1, t.getSd().getSortColsSize()); - Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol()); - Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder()); - Assert.assertEquals(1, t.getSd().getParametersSize()); - Assert.assertEquals("value", t.getSd().getParameters().get("key")); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals(tableName, t.getTableName()); - Assert.assertEquals(0, t.getParametersSize()); - } - - @Test - public void skewInfo() throws Exception { - String tableName = "mytable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, - serde, null, null, emptyParameters); - - Map, String> map = new HashMap, String>(); - map.put(Arrays.asList("col3"), "col4"); - SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), - map); - sd.setSkewedInfo(skew); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", tableName); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertTrue(t.getSd().isCompressed()); - Assert.assertEquals(0, t.getSd().getNumBuckets()); - Assert.assertEquals(0, t.getSd().getSortColsSize()); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals(tableName, t.getTableName()); - Assert.assertEquals(0, t.getParametersSize()); - - skew = t.getSd().getSkewedInfo(); - Assert.assertNotNull(skew); - Assert.assertEquals(1, skew.getSkewedColNamesSize()); - Assert.assertEquals("col1", skew.getSkewedColNames().get(0)); - Assert.assertEquals(1, skew.getSkewedColValuesSize()); - Assert.assertEquals("col2", skew.getSkewedColValues().get(0).get(0)); - Assert.assertEquals(1, skew.getSkewedColValueLocationMapsSize()); - Assert.assertEquals("col4", skew.getSkewedColValueLocationMaps().get(Arrays.asList("col3"))); - - } - - @Test - public void hashSd() throws Exception { - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, - serde, null, null, emptyParameters); - - Map, String> map = new HashMap, String>(); - map.put(Arrays.asList("col3"), "col4"); - SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), - map); - sd.setSkewedInfo(skew); - - MessageDigest md = MessageDigest.getInstance("MD5"); - byte[] baseHash = HBaseUtils.hashStorageDescriptor(sd, md); - - StorageDescriptor changeSchema = new StorageDescriptor(sd); - changeSchema.getCols().add(new FieldSchema("col2", "varchar(32)", "a comment")); - byte[] schemaHash = HBaseUtils.hashStorageDescriptor(changeSchema, md); - Assert.assertFalse(Arrays.equals(baseHash, schemaHash)); - - StorageDescriptor changeLocation = new StorageDescriptor(sd); - changeLocation.setLocation("file:/somewhere/else"); - byte[] locationHash = HBaseUtils.hashStorageDescriptor(changeLocation, md); - Assert.assertArrayEquals(baseHash, locationHash); - } - - @Test - public void alterTable() throws Exception { - String tableName = "alttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - startTime += 10; - table.setLastAccessTime(startTime); - store.alterTable("default", tableName, table); - - Table t = store.getTable("default", tableName); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals(tableName, t.getTableName()); - Assert.assertEquals(startTime, t.getLastAccessTime()); - } - - @Test - public void dropTable() throws Exception { - String tableName = "dtable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", tableName); - Assert.assertNotNull(t); - - store.dropTable("default", tableName); - Assert.assertNull(store.getTable("default", tableName)); - } - - @Test - public void createPartition() throws Exception { - String tableName = "myparttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = Arrays.asList("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(DB, tableName, vals); - Assert.assertEquals(1, p.getSd().getColsSize()); - Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); - Assert.assertEquals("input", p.getSd().getInputFormat()); - Assert.assertEquals("output", p.getSd().getOutputFormat()); - Assert.assertEquals(DB, p.getDbName()); - Assert.assertEquals(tableName, p.getTableName()); - Assert.assertEquals(1, p.getValuesSize()); - Assert.assertEquals("fred", p.getValues().get(0)); - - Assert.assertTrue(store.doesPartitionExist(DB, tableName, vals)); - Assert.assertFalse(store.doesPartitionExist(DB, tableName, Arrays.asList("bob"))); - } - - @Test - public void alterPartition() throws Exception { - String tableName = "alterparttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = Arrays.asList("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - part.setLastAccessTime(startTime + 10); - store.alterPartition(DB, tableName, vals, part); - - Partition p = store.getPartition(DB, tableName, vals); - Assert.assertEquals(1, p.getSd().getColsSize()); - Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); - Assert.assertEquals("input", p.getSd().getInputFormat()); - Assert.assertEquals("output", p.getSd().getOutputFormat()); - Assert.assertEquals(DB, p.getDbName()); - Assert.assertEquals(tableName, p.getTableName()); - Assert.assertEquals(1, p.getValuesSize()); - Assert.assertEquals("fred", p.getValues().get(0)); - Assert.assertEquals(startTime + 10, p.getLastAccessTime()); - - Assert.assertTrue(store.doesPartitionExist(DB, tableName, vals)); - Assert.assertFalse(store.doesPartitionExist(DB, tableName, Arrays.asList("bob"))); - } - - @Test - public void getPartitions() throws Exception { - String tableName = "manyParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); - for (String val : partVals) { - List vals = new ArrayList(); - vals.add(val); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + val); - Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(DB, tableName, vals); - Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); - } - - List parts = store.getPartitions(DB, tableName, -1); - Assert.assertEquals(5, parts.size()); - String[] pv = new String[5]; - for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0); - Arrays.sort(pv); - Assert.assertArrayEquals(pv, partVals.toArray(new String[5])); - } - - @Test - public void listGetDropPartitionNames() throws Exception { - String tableName = "listParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - partCols.add(new FieldSchema("region", "string", "")); - Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; - for (String[] pv : partVals) { - List vals = new ArrayList(); - for (String v : pv) vals.add(v); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]); - Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - } - - List names = store.listPartitionNames(DB, tableName, (short) -1); - Assert.assertEquals(2, names.size()); - String[] resultNames = names.toArray(new String[names.size()]); - Arrays.sort(resultNames); - Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america", - "pc=tomorrow/region=europe"}); - - List parts = store.getPartitionsByNames(DB, tableName, names); - Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2])); - Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2])); - - store.dropPartitions(DB, tableName, names); - List afterDropParts = store.getPartitions(DB, tableName, -1); - Assert.assertEquals(0, afterDropParts.size()); - } - - - @Test - public void dropPartition() throws Exception { - String tableName = "myparttable2"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = Arrays.asList("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Assert.assertNotNull(store.getPartition(DB, tableName, vals)); - store.dropPartition(DB, tableName, vals); - thrown.expect(NoSuchObjectException.class); - store.getPartition(DB, tableName, vals); - } - - @Test - public void createIndex() throws Exception { - String tableName = "mytable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - String indexName = "myindex"; - String indexTableName = tableName + "__" + indexName + "__"; - Index index = new Index(indexName, null, "default", tableName, startTime, startTime, - indexTableName, sd, emptyParameters, false); - store.addIndex(index); - - Index ind = store.getIndex("default", tableName, indexName); - Assert.assertEquals(1, ind.getSd().getColsSize()); - Assert.assertEquals("col1", ind.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", ind.getSd().getCols().get(0).getType()); - Assert.assertEquals("", ind.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", ind.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", ind.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", ind.getSd().getLocation()); - Assert.assertEquals("input", ind.getSd().getInputFormat()); - Assert.assertEquals("output", ind.getSd().getOutputFormat()); - Assert.assertFalse(ind.getSd().isCompressed()); - Assert.assertEquals(17, ind.getSd().getNumBuckets()); - Assert.assertEquals(1, ind.getSd().getBucketColsSize()); - Assert.assertEquals("bucketcol", ind.getSd().getBucketCols().get(0)); - Assert.assertEquals(1, ind.getSd().getSortColsSize()); - Assert.assertEquals("sortcol", ind.getSd().getSortCols().get(0).getCol()); - Assert.assertEquals(1, ind.getSd().getSortCols().get(0).getOrder()); - Assert.assertEquals(1, ind.getSd().getParametersSize()); - Assert.assertEquals("value", ind.getSd().getParameters().get("key")); - Assert.assertEquals(indexName, ind.getIndexName()); - Assert.assertNull(ind.getIndexHandlerClass()); - Assert.assertEquals("default", ind.getDbName()); - Assert.assertEquals(tableName, ind.getOrigTableName()); - Assert.assertEquals(0, ind.getParametersSize()); - Assert.assertEquals(startTime, ind.getCreateTime()); - Assert.assertEquals(startTime, ind.getLastAccessTime()); - Assert.assertEquals(false, ind.isDeferredRebuild()); - } - - @Test - public void alterIndex() throws Exception { - String tableName = "mytable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - String indexName = "myindex"; - Index index = new Index(indexName, null, "default", tableName, startTime, startTime, - tableName + "__" + indexName + "__", sd, emptyParameters, false); - store.addIndex(index); - - startTime += 10; - index.setLastAccessTime(startTime); - store.alterIndex("default", tableName, indexName, index); - - Index ind = store.getIndex("default", tableName, indexName); - Assert.assertEquals(1, ind.getSd().getColsSize()); - Assert.assertEquals("col1", ind.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", ind.getSd().getCols().get(0).getType()); - Assert.assertEquals("", ind.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", ind.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", ind.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", ind.getSd().getLocation()); - Assert.assertEquals("input", ind.getSd().getInputFormat()); - Assert.assertEquals("output", ind.getSd().getOutputFormat()); - Assert.assertFalse(ind.getSd().isCompressed()); - Assert.assertEquals(17, ind.getSd().getNumBuckets()); - Assert.assertEquals(1, ind.getSd().getBucketColsSize()); - Assert.assertEquals("bucketcol", ind.getSd().getBucketCols().get(0)); - Assert.assertEquals(1, ind.getSd().getSortColsSize()); - Assert.assertEquals("sortcol", ind.getSd().getSortCols().get(0).getCol()); - Assert.assertEquals(1, ind.getSd().getSortCols().get(0).getOrder()); - Assert.assertEquals(1, ind.getSd().getParametersSize()); - Assert.assertEquals("value", ind.getSd().getParameters().get("key")); - Assert.assertEquals(indexName, ind.getIndexName()); - Assert.assertNull(ind.getIndexHandlerClass()); - Assert.assertEquals("default", ind.getDbName()); - Assert.assertEquals(tableName, ind.getOrigTableName()); - Assert.assertEquals(0, ind.getParametersSize()); - Assert.assertEquals(startTime, ind.getLastAccessTime()); - Assert.assertEquals(false, ind.isDeferredRebuild()); - } - - @Test - public void dropIndex() throws Exception { - String tableName = "mytable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - String indexName = "myindex"; - Index index = new Index(indexName, null, "default", tableName, startTime, startTime, - tableName + "__" + indexName + "__", sd, emptyParameters, false); - store.addIndex(index); - - store.dropIndex("default", tableName, indexName); - - } - - @Test - public void createRole() throws Exception { - int now = (int)System.currentTimeMillis()/1000; - String roleName = "myrole"; - store.addRole(roleName, "me"); - - Role r = store.getRole(roleName); - Assert.assertEquals(roleName, r.getRoleName()); - Assert.assertEquals("me", r.getOwnerName()); - Assert.assertTrue(now <= r.getCreateTime()); - } - - @Test - public void dropRole() throws Exception { - String roleName = "anotherrole"; - store.addRole(roleName, "me"); - - Role role = store.getRole(roleName); - Assert.assertNotNull(role); - - store.removeRole(roleName); - thrown.expect(NoSuchObjectException.class); - store.getRole(roleName); - } - - // Due to the way our mock stuff works, we can only insert one column at a time, so we'll test - // each stat type separately. We'll test them together in the integration tests. - @Test - public void booleanTableStatistics() throws Exception { - // Add a boolean table stats for BOOLEAN_COL to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = booleanColStatsObjs.get(0); - BooleanColumnStatsData boolData = obj.getStatsData().getBooleanStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(BOOLEAN_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, dataFromDB.getSetField()); - // Compare BooleanColumnStatsData - BooleanColumnStatsData boolDataFromDB = dataFromDB.getBooleanStats(); - Assert.assertEquals(boolData.getNumTrues(), boolDataFromDB.getNumTrues()); - Assert.assertEquals(boolData.getNumFalses(), boolDataFromDB.getNumFalses()); - Assert.assertEquals(boolData.getNumNulls(), boolDataFromDB.getNumNulls()); - } - - @Test - public void longTableStatistics() throws Exception { - createMockTable(LONG_TYPE); - // Add a long table stats for LONG_COL to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = longColStatsObjs.get(0); - LongColumnStatsData longData = obj.getStatsData().getLongStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(LONG_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, dataFromDB.getSetField()); - // Compare LongColumnStatsData - LongColumnStatsData longDataFromDB = dataFromDB.getLongStats(); - Assert.assertEquals(longData.getHighValue(), longDataFromDB.getHighValue()); - Assert.assertEquals(longData.getLowValue(), longDataFromDB.getLowValue()); - Assert.assertEquals(longData.getNumNulls(), longDataFromDB.getNumNulls()); - Assert.assertEquals(longData.getNumDVs(), longDataFromDB.getNumDVs()); - } - - @Test - public void doubleTableStatistics() throws Exception { - createMockTable(DOUBLE_TYPE); - // Add a double table stats for DOUBLE_COL to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = doubleColStatsObjs.get(0); - DoubleColumnStatsData doubleData = obj.getStatsData().getDoubleStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(DOUBLE_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, dataFromDB.getSetField()); - // Compare DoubleColumnStatsData - DoubleColumnStatsData doubleDataFromDB = dataFromDB.getDoubleStats(); - Assert.assertEquals(doubleData.getHighValue(), doubleDataFromDB.getHighValue(), 0.01); - Assert.assertEquals(doubleData.getLowValue(), doubleDataFromDB.getLowValue(), 0.01); - Assert.assertEquals(doubleData.getNumNulls(), doubleDataFromDB.getNumNulls()); - Assert.assertEquals(doubleData.getNumDVs(), doubleDataFromDB.getNumDVs()); - } - - @Test - public void stringTableStatistics() throws Exception { - createMockTable(STRING_TYPE); - // Add a string table stats for STRING_COL to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = stringColStatsObjs.get(0); - StringColumnStatsData stringData = obj.getStatsData().getStringStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(STRING_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, dataFromDB.getSetField()); - // Compare StringColumnStatsData - StringColumnStatsData stringDataFromDB = dataFromDB.getStringStats(); - Assert.assertEquals(stringData.getMaxColLen(), stringDataFromDB.getMaxColLen()); - Assert.assertEquals(stringData.getAvgColLen(), stringDataFromDB.getAvgColLen(), 0.01); - Assert.assertEquals(stringData.getNumNulls(), stringDataFromDB.getNumNulls()); - Assert.assertEquals(stringData.getNumDVs(), stringDataFromDB.getNumDVs()); - } - - @Test - public void binaryTableStatistics() throws Exception { - createMockTable(BINARY_TYPE); - // Add a binary table stats for BINARY_COL to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = binaryColStatsObjs.get(0); - BinaryColumnStatsData binaryData = obj.getStatsData().getBinaryStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(BINARY_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, dataFromDB.getSetField()); - // Compare BinaryColumnStatsData - BinaryColumnStatsData binaryDataFromDB = dataFromDB.getBinaryStats(); - Assert.assertEquals(binaryData.getMaxColLen(), binaryDataFromDB.getMaxColLen()); - Assert.assertEquals(binaryData.getAvgColLen(), binaryDataFromDB.getAvgColLen(), 0.01); - Assert.assertEquals(binaryData.getNumNulls(), binaryDataFromDB.getNumNulls()); - } - - @Test - public void decimalTableStatistics() throws Exception { - createMockTable(DECIMAL_TYPE); - // Add a decimal table stats for DECIMAL_COL to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = decimalColStatsObjs.get(0); - DecimalColumnStatsData decimalData = obj.getStatsData().getDecimalStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(DECIMAL_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, dataFromDB.getSetField()); - // Compare DecimalColumnStatsData - DecimalColumnStatsData decimalDataFromDB = dataFromDB.getDecimalStats(); - Assert.assertEquals(decimalData.getHighValue(), decimalDataFromDB.getHighValue()); - Assert.assertEquals(decimalData.getLowValue(), decimalDataFromDB.getLowValue()); - Assert.assertEquals(decimalData.getNumNulls(), decimalDataFromDB.getNumNulls()); - Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs()); - } - - @Test - public void booleanPartitionStatistics() throws Exception { - createMockTableAndPartition(BOOLEAN_TYPE, BOOLEAN_VAL); - // Add partition stats for: BOOLEAN_COL and partition: {PART_KEY, BOOLEAN_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, BOOLEAN_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = booleanColStatsObjs.get(0); - BooleanColumnStatsData boolData = obj.getStatsData().getBooleanStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(BOOLEAN_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, dataFromDB.getSetField()); - // Compare BooleanColumnStatsData - BooleanColumnStatsData boolDataFromDB = dataFromDB.getBooleanStats(); - Assert.assertEquals(boolData.getNumTrues(), boolDataFromDB.getNumTrues()); - Assert.assertEquals(boolData.getNumFalses(), boolDataFromDB.getNumFalses()); - Assert.assertEquals(boolData.getNumNulls(), boolDataFromDB.getNumNulls()); - } - - @Test - public void longPartitionStatistics() throws Exception { - createMockTableAndPartition(INT_TYPE, INT_VAL); - // Add partition stats for: LONG_COL and partition: {PART_KEY, INT_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, INT_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = longColStatsObjs.get(0); - LongColumnStatsData longData = obj.getStatsData().getLongStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(INT_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, dataFromDB.getSetField()); - // Compare LongColumnStatsData - LongColumnStatsData longDataFromDB = dataFromDB.getLongStats(); - Assert.assertEquals(longData.getHighValue(), longDataFromDB.getHighValue()); - Assert.assertEquals(longData.getLowValue(), longDataFromDB.getLowValue()); - Assert.assertEquals(longData.getNumNulls(), longDataFromDB.getNumNulls()); - Assert.assertEquals(longData.getNumDVs(), longDataFromDB.getNumDVs()); - } - - @Test - public void doublePartitionStatistics() throws Exception { - createMockTableAndPartition(DOUBLE_TYPE, DOUBLE_VAL); - // Add partition stats for: DOUBLE_COL and partition: {PART_KEY, DOUBLE_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DOUBLE_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = doubleColStatsObjs.get(0); - DoubleColumnStatsData doubleData = obj.getStatsData().getDoubleStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(DOUBLE_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, dataFromDB.getSetField()); - // Compare DoubleColumnStatsData - DoubleColumnStatsData doubleDataFromDB = dataFromDB.getDoubleStats(); - Assert.assertEquals(doubleData.getHighValue(), doubleDataFromDB.getHighValue(), 0.01); - Assert.assertEquals(doubleData.getLowValue(), doubleDataFromDB.getLowValue(), 0.01); - Assert.assertEquals(doubleData.getNumNulls(), doubleDataFromDB.getNumNulls()); - Assert.assertEquals(doubleData.getNumDVs(), doubleDataFromDB.getNumDVs()); - } - - @Test - public void stringPartitionStatistics() throws Exception { - createMockTableAndPartition(STRING_TYPE, STRING_VAL); - // Add partition stats for: STRING_COL and partition: {PART_KEY, STRING_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, STRING_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = stringColStatsObjs.get(0); - StringColumnStatsData stringData = obj.getStatsData().getStringStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(STRING_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, dataFromDB.getSetField()); - // Compare StringColumnStatsData - StringColumnStatsData stringDataFromDB = dataFromDB.getStringStats(); - Assert.assertEquals(stringData.getMaxColLen(), stringDataFromDB.getMaxColLen()); - Assert.assertEquals(stringData.getAvgColLen(), stringDataFromDB.getAvgColLen(), 0.01); - Assert.assertEquals(stringData.getNumNulls(), stringDataFromDB.getNumNulls()); - Assert.assertEquals(stringData.getNumDVs(), stringDataFromDB.getNumDVs()); - } - - @Test - public void binaryPartitionStatistics() throws Exception { - createMockTableAndPartition(BINARY_TYPE, BINARY_VAL); - // Add partition stats for: BINARY_COL and partition: {PART_KEY, BINARY_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, BINARY_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = binaryColStatsObjs.get(0); - BinaryColumnStatsData binaryData = obj.getStatsData().getBinaryStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(BINARY_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, dataFromDB.getSetField()); - // Compare BinaryColumnStatsData - BinaryColumnStatsData binaryDataFromDB = dataFromDB.getBinaryStats(); - Assert.assertEquals(binaryData.getMaxColLen(), binaryDataFromDB.getMaxColLen()); - Assert.assertEquals(binaryData.getAvgColLen(), binaryDataFromDB.getAvgColLen(), 0.01); - Assert.assertEquals(binaryData.getNumNulls(), binaryDataFromDB.getNumNulls()); - } - - @Test - public void decimalPartitionStatistics() throws Exception { - createMockTableAndPartition(DECIMAL_TYPE, DECIMAL_VAL); - // Add partition stats for: DECIMAL_COL and partition: {PART_KEY, DECIMAL_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DECIMAL_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = decimalColStatsObjs.get(0); - DecimalColumnStatsData decimalData = obj.getStatsData().getDecimalStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(DECIMAL_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, dataFromDB.getSetField()); - // Compare DecimalColumnStatsData - DecimalColumnStatsData decimalDataFromDB = dataFromDB.getDecimalStats(); - Assert.assertEquals(decimalData.getHighValue(), decimalDataFromDB.getHighValue()); - Assert.assertEquals(decimalData.getLowValue(), decimalDataFromDB.getLowValue()); - Assert.assertEquals(decimalData.getNumNulls(), decimalDataFromDB.getNumNulls()); - Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs()); - } - - @Test - public void createTableWithPrimaryKey() throws Exception { - String tableName = "pktable"; - String pkName = "test_pk"; - String pkColNames[] = { "col0" }; - Table table = createMultiColumnTable(tableName, "int"); - - List pk = Arrays.asList( - new SQLPrimaryKey(DB, tableName, pkColNames[0], 0, pkName, true, false, true)); - - store.createTableWithConstraints(table, pk, null, null, null); - - pk = store.getPrimaryKeys(DB, tableName); - - Assert.assertNotNull(pk); - Assert.assertEquals(1, pk.size()); - Assert.assertEquals(DB, pk.get(0).getTable_db()); - Assert.assertEquals(tableName, pk.get(0).getTable_name()); - Assert.assertEquals(pkColNames[0], pk.get(0).getColumn_name()); - Assert.assertEquals(0, pk.get(0).getKey_seq()); - Assert.assertEquals(pkName, pk.get(0).getPk_name()); - Assert.assertTrue(pk.get(0).isEnable_cstr()); - Assert.assertFalse(pk.get(0).isValidate_cstr()); - Assert.assertTrue(pk.get(0).isRely_cstr()); - - // Drop the primary key - store.dropConstraint(DB, tableName, pkName); - - pk = store.getPrimaryKeys(DB, tableName); - Assert.assertNull(pk); - } - - @Test - public void createTableWithForeignKey() throws Exception { - String tableName = "fktable"; - String pkTable = "pktable"; - String pkName = "test_pk"; - String fkName = "test_fk"; - String fkColNames[] = { "col0" }; - String pkColNames[] = { "pcol0" }; - Table table = createMultiColumnTable(tableName, "int"); - - List fk = Arrays.asList( - new SQLForeignKey(DB, pkTable, pkColNames[0], DB, tableName, fkColNames[0], 0, 1, 2, - fkName, pkName, true, false, false)); - - store.createTableWithConstraints(table, null, fk, null, null); - - fk = store.getForeignKeys(DB, pkTable, DB, tableName); - - Assert.assertNotNull(fk); - Assert.assertEquals(1, fk.size()); - Assert.assertEquals(DB, fk.get(0).getPktable_db()); - Assert.assertEquals(pkTable, fk.get(0).getPktable_name()); - Assert.assertEquals(pkColNames[0], fk.get(0).getPkcolumn_name()); - Assert.assertEquals(DB, fk.get(0).getFktable_db()); - Assert.assertEquals(tableName, fk.get(0).getFktable_name()); - Assert.assertEquals(fkColNames[0], fk.get(0).getFkcolumn_name()); - Assert.assertEquals(0, fk.get(0).getKey_seq()); - Assert.assertEquals(1, fk.get(0).getUpdate_rule()); - Assert.assertEquals(2, fk.get(0).getDelete_rule()); - Assert.assertEquals(fkName, fk.get(0).getFk_name()); - Assert.assertEquals(pkName, fk.get(0).getPk_name()); - Assert.assertTrue(fk.get(0).isEnable_cstr()); - Assert.assertFalse(fk.get(0).isValidate_cstr()); - Assert.assertFalse(fk.get(0).isRely_cstr()); - } - - // Test that we can add a primary key with multiple columns - @Test - public void addMultiColPrimaryKey() throws Exception { - String tableName = "mcpktable"; - String pkName = "test_pk"; - String pkColNames[] = { "col0", "col1", "col2" }; - Table table = createMultiColumnTable(tableName, "int", "varchar(32)", "decimal(10,2)"); - - List pk = Arrays.asList( - new SQLPrimaryKey(DB, tableName, pkColNames[1], 0, pkName, false, true, true), - new SQLPrimaryKey(DB, tableName, pkColNames[2], 1, pkName, false, true, true) - ); - - store.createTable(table); - store.addPrimaryKeys(pk); - - Assert.assertNotNull(pk); - Assert.assertEquals(2, pk.size()); - SQLPrimaryKey[] sorted = pk.toArray(new SQLPrimaryKey[2]); - Arrays.sort(sorted, new Comparator() { - @Override - public int compare(SQLPrimaryKey o1, SQLPrimaryKey o2) { - return o1.getColumn_name().compareTo(o2.getColumn_name()); - } - }); - for (int i = 0; i < 2; i++) { - Assert.assertEquals(DB, sorted[i].getTable_db()); - Assert.assertEquals(tableName, sorted[i].getTable_name()); - Assert.assertEquals(pkColNames[i+1], sorted[i].getColumn_name()); - Assert.assertEquals(i, sorted[i].getKey_seq()); - Assert.assertEquals(pkName, sorted[i].getPk_name()); - Assert.assertFalse(sorted[i].isEnable_cstr()); - Assert.assertTrue(sorted[i].isValidate_cstr()); - Assert.assertTrue(sorted[i].isRely_cstr()); - } - - } - - // Test that we can create a foreign key with multiple columns - @Test - public void addMultiColForeignKey() throws Exception { - String tableName = "mcfktable"; - String pkTable = "pktable"; - String pkName = "test_pk"; - String fkName = "test_fk"; - String fkColNames[] = { "col0", "col1", "col2" }; - String pkColNames[] = { "pcol0", "pcol1" }; - Table table = createMultiColumnTable(tableName, "int", "double", "timestamp"); - - List fk = Arrays.asList( - new SQLForeignKey(DB, pkTable, pkColNames[0], DB, tableName, fkColNames[1], 0, 1, 2, - fkName, pkName, true, false, false), - new SQLForeignKey(DB, pkTable, pkColNames[1], DB, tableName, fkColNames[2], 1, 1, 2, - fkName, pkName, true, false, false) - ); - - store.createTable(table); - store.addForeignKeys(fk); - - fk = store.getForeignKeys(DB, pkTable, DB, tableName); - - Assert.assertNotNull(fk); - Assert.assertEquals(2, fk.size()); - SQLForeignKey[] sorted = fk.toArray(new SQLForeignKey[2]); - Arrays.sort(sorted, new Comparator() { - @Override - public int compare(SQLForeignKey o1, SQLForeignKey o2) { - if (o1.getFk_name().equals(o2.getFk_name())) { - return o1.getFkcolumn_name().compareTo(o2.getFkcolumn_name()); - } else { - return o1.getFk_name().compareTo(o2.getFk_name()); - } - } - }); - - for (int i = 0; i < 2; i++) { - Assert.assertEquals(DB, sorted[i].getPktable_db()); - Assert.assertEquals(pkTable, sorted[i].getPktable_name()); - Assert.assertEquals(pkColNames[i], sorted[i].getPkcolumn_name()); - Assert.assertEquals(DB, sorted[i].getFktable_db()); - Assert.assertEquals(tableName, sorted[i].getFktable_name()); - Assert.assertEquals(fkColNames[i+1], sorted[i].getFkcolumn_name()); - Assert.assertEquals(i, sorted[i].getKey_seq()); - Assert.assertEquals(1, sorted[i].getUpdate_rule()); - Assert.assertEquals(2, sorted[i].getDelete_rule()); - Assert.assertEquals(fkName, sorted[i].getFk_name()); - Assert.assertEquals(pkName, sorted[i].getPk_name()); - Assert.assertTrue(sorted[i].isEnable_cstr()); - Assert.assertFalse(sorted[i].isValidate_cstr()); - Assert.assertFalse(sorted[i].isRely_cstr()); - } - - } - - // Test that we can add 2 foreign keys at once - @Test - public void addMultiForeignKeys() throws Exception { - String tableName = "mcfktable"; - String pkTable = "pktable"; - String pkTable2 = "pktable2"; - String pkName = "test_pk"; - String pkName2 = "test_pk2"; - String fkName = "test_fk"; - String fkName2 = "test_fk2"; - String fkColNames[] = { "col0", "col1", "col2" }; - String pkColNames[] = { "pcol0", "pcol1" }; - String pkColNames2[] = { "p2col0" }; - Table table = createMultiColumnTable(tableName, "int", "double", "timestamp"); - - List fk = Arrays.asList( - new SQLForeignKey(DB, pkTable, pkColNames[0], DB, tableName, fkColNames[1], 0, 1, 2, - fkName, pkName, true, false, true), - new SQLForeignKey(DB, pkTable, pkColNames[1], DB, tableName, fkColNames[2], 1, 1, 2, - fkName, pkName, true, false, true), - new SQLForeignKey(DB, pkTable2, pkColNames2[0], DB, tableName, fkColNames[0], 0, 1, 2, - fkName2, pkName2, true, false, true) - ); - - store.createTable(table); - store.addForeignKeys(fk); - - fk = store.getForeignKeys(DB, pkTable, DB, tableName); - - Assert.assertNotNull(fk); - Assert.assertEquals(2, fk.size()); - SQLForeignKey[] sorted = fk.toArray(new SQLForeignKey[2]); - Arrays.sort(sorted, new Comparator() { - @Override - public int compare(SQLForeignKey o1, SQLForeignKey o2) { - if (o1.getFk_name().equals(o2.getFk_name())) { - return o1.getFkcolumn_name().compareTo(o2.getFkcolumn_name()); - } else { - return o1.getFk_name().compareTo(o2.getFk_name()); - } - } - }); - - for (int i = 0; i < 2; i++) { - Assert.assertEquals(DB, sorted[i].getPktable_db()); - Assert.assertEquals(pkTable, sorted[i].getPktable_name()); - Assert.assertEquals(pkColNames[i], sorted[i].getPkcolumn_name()); - Assert.assertEquals(DB, sorted[i].getFktable_db()); - Assert.assertEquals(tableName, sorted[i].getFktable_name()); - Assert.assertEquals(fkColNames[i+1], sorted[i].getFkcolumn_name()); - Assert.assertEquals(i, sorted[i].getKey_seq()); - Assert.assertEquals(1, sorted[i].getUpdate_rule()); - Assert.assertEquals(2, sorted[i].getDelete_rule()); - Assert.assertEquals(fkName, sorted[i].getFk_name()); - Assert.assertEquals(pkName, sorted[i].getPk_name()); - Assert.assertTrue(sorted[i].isEnable_cstr()); - Assert.assertFalse(sorted[i].isValidate_cstr()); - Assert.assertTrue(sorted[i].isRely_cstr()); - } - fk = store.getForeignKeys(DB, pkTable2, DB, tableName); - Assert.assertNotNull(fk); - Assert.assertEquals(1, fk.size()); - Assert.assertEquals(DB, fk.get(0).getPktable_db()); - Assert.assertEquals(pkTable2, fk.get(0).getPktable_name()); - Assert.assertEquals(pkColNames2[0], fk.get(0).getPkcolumn_name()); - Assert.assertEquals(DB, fk.get(0).getFktable_db()); - Assert.assertEquals(tableName, fk.get(0).getFktable_name()); - Assert.assertEquals(fkColNames[0], fk.get(0).getFkcolumn_name()); - Assert.assertEquals(0, fk.get(0).getKey_seq()); - Assert.assertEquals(1, fk.get(0).getUpdate_rule()); - Assert.assertEquals(2, fk.get(0).getDelete_rule()); - Assert.assertEquals(fkName2, fk.get(0).getFk_name()); - Assert.assertEquals(pkName2, fk.get(0).getPk_name()); - Assert.assertTrue(fk.get(0).isEnable_cstr()); - Assert.assertFalse(fk.get(0).isValidate_cstr()); - Assert.assertTrue(fk.get(0).isRely_cstr()); - - } - - // Test that we can add a foreign key when one already exists - @Test - public void addSecondForeignKeys() throws Exception { - String tableName = "mcfktable"; - String pkTable = "pktable"; - String pkTable2 = "pktable2"; - String pkName = "test_pk"; - String pkName2 = "test_pk2"; - String fkName = "test_fk"; - String fkName2 = "test_fk2"; - String fkColNames[] = { "col0", "col1", "col2" }; - String pkColNames[] = { "pcol0", "pcol1" }; - String pkColNames2[] = { "p2col0" }; - Table table = createMultiColumnTable(tableName, "int", "double", "timestamp"); - - List fk = Arrays.asList( - new SQLForeignKey(DB, pkTable, pkColNames[0], DB, tableName, fkColNames[1], 0, 1, 2, - fkName, pkName, true, false, true), - new SQLForeignKey(DB, pkTable, pkColNames[1], DB, tableName, fkColNames[2], 1, 1, 2, - fkName, pkName, true, false, true) - ); - - store.createTable(table); - store.addForeignKeys(fk); - - fk = Arrays.asList( - new SQLForeignKey(DB, pkTable2, pkColNames2[0], DB, tableName, fkColNames[0], 0, 1, 2, - fkName2, pkName2, true, false, true) - ); - store.addForeignKeys(fk); - - fk = store.getForeignKeys(DB, pkTable, DB, tableName); - - Assert.assertNotNull(fk); - Assert.assertEquals(2, fk.size()); - SQLForeignKey[] sorted = fk.toArray(new SQLForeignKey[2]); - Arrays.sort(sorted, new Comparator() { - @Override - public int compare(SQLForeignKey o1, SQLForeignKey o2) { - if (o1.getFk_name().equals(o2.getFk_name())) { - return o1.getFkcolumn_name().compareTo(o2.getFkcolumn_name()); - } else { - return o1.getFk_name().compareTo(o2.getFk_name()); - } - } - }); - - for (int i = 0; i < 2; i++) { - Assert.assertEquals(DB, sorted[i].getPktable_db()); - Assert.assertEquals(pkTable, sorted[i].getPktable_name()); - Assert.assertEquals(pkColNames[i], sorted[i].getPkcolumn_name()); - Assert.assertEquals(DB, sorted[i].getFktable_db()); - Assert.assertEquals(tableName, sorted[i].getFktable_name()); - Assert.assertEquals(fkColNames[i+1], sorted[i].getFkcolumn_name()); - Assert.assertEquals(i, sorted[i].getKey_seq()); - Assert.assertEquals(1, sorted[i].getUpdate_rule()); - Assert.assertEquals(2, sorted[i].getDelete_rule()); - Assert.assertEquals(fkName, sorted[i].getFk_name()); - Assert.assertEquals(pkName, sorted[i].getPk_name()); - Assert.assertTrue(sorted[i].isEnable_cstr()); - Assert.assertFalse(sorted[i].isValidate_cstr()); - Assert.assertTrue(sorted[i].isRely_cstr()); - } - - fk = store.getForeignKeys(DB, pkTable2, DB, tableName); - Assert.assertNotNull(fk); - Assert.assertEquals(1, fk.size()); - Assert.assertEquals(DB, fk.get(0).getPktable_db()); - Assert.assertEquals(pkTable2, fk.get(0).getPktable_name()); - Assert.assertEquals(pkColNames2[0], fk.get(0).getPkcolumn_name()); - Assert.assertEquals(DB, fk.get(0).getFktable_db()); - Assert.assertEquals(tableName, fk.get(0).getFktable_name()); - Assert.assertEquals(fkColNames[0], fk.get(0).getFkcolumn_name()); - Assert.assertEquals(0, fk.get(0).getKey_seq()); - Assert.assertEquals(1, fk.get(0).getUpdate_rule()); - Assert.assertEquals(2, fk.get(0).getDelete_rule()); - Assert.assertEquals(fkName2, fk.get(0).getFk_name()); - Assert.assertEquals(pkName2, fk.get(0).getPk_name()); - Assert.assertTrue(fk.get(0).isEnable_cstr()); - Assert.assertFalse(fk.get(0).isValidate_cstr()); - Assert.assertTrue(fk.get(0).isRely_cstr()); - - // Check that passing null gets all the foreign keys - fk = store.getForeignKeys(null, null, DB, tableName); - Assert.assertNotNull(fk); - Assert.assertEquals(3, fk.size()); - - store.dropConstraint(DB, tableName, fkName); - - fk = store.getForeignKeys(DB, pkTable2, DB, tableName); - Assert.assertNotNull(fk); - Assert.assertEquals(1, fk.size()); - Assert.assertEquals(DB, fk.get(0).getPktable_db()); - Assert.assertEquals(pkTable2, fk.get(0).getPktable_name()); - Assert.assertEquals(pkColNames2[0], fk.get(0).getPkcolumn_name()); - Assert.assertEquals(DB, fk.get(0).getFktable_db()); - Assert.assertEquals(tableName, fk.get(0).getFktable_name()); - Assert.assertEquals(fkColNames[0], fk.get(0).getFkcolumn_name()); - Assert.assertEquals(0, fk.get(0).getKey_seq()); - Assert.assertEquals(1, fk.get(0).getUpdate_rule()); - Assert.assertEquals(2, fk.get(0).getDelete_rule()); - Assert.assertEquals(fkName2, fk.get(0).getFk_name()); - Assert.assertEquals(pkName2, fk.get(0).getPk_name()); - Assert.assertTrue(fk.get(0).isEnable_cstr()); - Assert.assertFalse(fk.get(0).isValidate_cstr()); - Assert.assertTrue(fk.get(0).isRely_cstr()); - - store.dropConstraint(DB, tableName, fkName2); - - fk = store.getForeignKeys(DB, pkTable2, DB, tableName); - Assert.assertNull(fk); - } - - // Try adding a primary key when one already exists - @Test(expected= MetaException.class) - public void doublePrimaryKey() throws Exception { - String tableName = "pktable"; - String pkName = "test_pk"; - String pkColNames[] = { "col0" }; - Table table = createMultiColumnTable(tableName, "int"); - - List pk = Arrays.asList( - new SQLPrimaryKey(DB, tableName, pkColNames[0], 0, pkName, true, false, true)); - - store.createTableWithConstraints(table, pk, null, null, null); - - store.addPrimaryKeys(pk); - } - - @Test - public void createTableWithUniqueConstraint() throws Exception { - String tableName = "uktable"; - String ukName = "test_uk"; - String ukColNames[] = { "col0" }; - Table table = createMultiColumnTable(tableName, "int"); - - List uk = Arrays.asList( - new SQLUniqueConstraint(DB, tableName, ukColNames[0], 0, ukName, true, false, true)); - - store.createTableWithConstraints(table, null, null, uk, null); - - uk = store.getUniqueConstraints(DB, tableName); - - Assert.assertNotNull(uk); - Assert.assertEquals(1, uk.size()); - Assert.assertEquals(DB, uk.get(0).getTable_db()); - Assert.assertEquals(tableName, uk.get(0).getTable_name()); - Assert.assertEquals(ukColNames[0], uk.get(0).getColumn_name()); - Assert.assertEquals(0, uk.get(0).getKey_seq()); - Assert.assertEquals(ukName, uk.get(0).getUk_name()); - Assert.assertTrue(uk.get(0).isEnable_cstr()); - Assert.assertFalse(uk.get(0).isValidate_cstr()); - Assert.assertTrue(uk.get(0).isRely_cstr()); - - // Drop the unique constraint - store.dropConstraint(DB, tableName, ukName); - - uk = store.getUniqueConstraints(DB, tableName); - Assert.assertNull(uk); - } - - @Test - public void addMultiUniqueConstraints() throws Exception { - String tableName = "mcuktable"; - String ukName = "test_uk"; - String ukName2 = "test_uk2"; - String ukColNames[] = { "col0", "col1" }; - Table table = createMultiColumnTable(tableName, "int", "double", "timestamp"); - - List uks = Arrays.asList( - new SQLUniqueConstraint(DB, tableName, ukColNames[0], 0, ukName, true, false, true), - new SQLUniqueConstraint(DB, tableName, ukColNames[1], 0, ukName2, true, false, true) - ); - - store.createTable(table); - store.addUniqueConstraints(uks); - - uks = store.getUniqueConstraints(DB, tableName); - - Assert.assertNotNull(uks); - Assert.assertEquals(2, uks.size()); - SQLUniqueConstraint[] sorted = uks.toArray(new SQLUniqueConstraint[2]); - Arrays.sort(sorted, new Comparator() { - @Override - public int compare(SQLUniqueConstraint o1, SQLUniqueConstraint o2) { - if (o1.getUk_name().equals(o2.getUk_name())) { - return o1.getColumn_name().compareTo(o2.getColumn_name()); - } else { - return o1.getUk_name().compareTo(o2.getUk_name()); - } - } - }); - - Assert.assertEquals(DB, sorted[0].getTable_db()); - Assert.assertEquals(tableName, sorted[0].getTable_name()); - Assert.assertEquals(ukColNames[0], sorted[0].getColumn_name()); - Assert.assertEquals(0, sorted[0].getKey_seq()); - Assert.assertEquals(ukName, sorted[0].getUk_name()); - Assert.assertTrue(sorted[0].isEnable_cstr()); - Assert.assertFalse(sorted[0].isValidate_cstr()); - Assert.assertTrue(sorted[0].isRely_cstr()); - - Assert.assertEquals(DB, sorted[1].getTable_db()); - Assert.assertEquals(tableName, sorted[1].getTable_name()); - Assert.assertEquals(ukColNames[1], sorted[1].getColumn_name()); - Assert.assertEquals(0, sorted[1].getKey_seq()); - Assert.assertEquals(ukName2, sorted[1].getUk_name()); - Assert.assertTrue(sorted[1].isEnable_cstr()); - Assert.assertFalse(sorted[1].isValidate_cstr()); - Assert.assertTrue(sorted[1].isRely_cstr()); - } - - @Test - public void addMultiNotNullConstraints() throws Exception { - String tableName = "mcnntable"; - String nnName = "test_nn"; - String nnName2 = "test_nn2"; - String nnColNames[] = { "col0", "col1" }; - Table table = createMultiColumnTable(tableName, "int", "double", "timestamp"); - - List nns = Arrays.asList( - new SQLNotNullConstraint(DB, tableName, nnColNames[0], nnName, true, false, true), - new SQLNotNullConstraint(DB, tableName, nnColNames[1], nnName2, true, false, true) - ); - - store.createTable(table); - store.addNotNullConstraints(nns); - - nns = store.getNotNullConstraints(DB, tableName); - - Assert.assertNotNull(nns); - Assert.assertEquals(2, nns.size()); - SQLNotNullConstraint[] sorted = nns.toArray(new SQLNotNullConstraint[2]); - Arrays.sort(sorted, new Comparator() { - @Override - public int compare(SQLNotNullConstraint o1, SQLNotNullConstraint o2) { - if (o1.getNn_name().equals(o2.getNn_name())) { - return o1.getColumn_name().compareTo(o2.getColumn_name()); - } else { - return o1.getNn_name().compareTo(o2.getNn_name()); - } - } - }); - - Assert.assertEquals(DB, sorted[0].getTable_db()); - Assert.assertEquals(tableName, sorted[0].getTable_name()); - Assert.assertEquals(nnColNames[0], sorted[0].getColumn_name()); - Assert.assertEquals(nnName, sorted[0].getNn_name()); - Assert.assertTrue(sorted[0].isEnable_cstr()); - Assert.assertFalse(sorted[0].isValidate_cstr()); - Assert.assertTrue(sorted[0].isRely_cstr()); - - Assert.assertEquals(DB, sorted[1].getTable_db()); - Assert.assertEquals(tableName, sorted[1].getTable_name()); - Assert.assertEquals(nnColNames[1], sorted[1].getColumn_name()); - Assert.assertEquals(nnName2, sorted[1].getNn_name()); - Assert.assertTrue(sorted[1].isEnable_cstr()); - Assert.assertFalse(sorted[1].isValidate_cstr()); - Assert.assertTrue(sorted[1].isRely_cstr()); - } - - private Table createMockTableAndPartition(String partType, String partVal) throws Exception { - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", partType, "")); - List vals = new ArrayList(); - vals.add(partVal); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - int currentTime = (int)(System.currentTimeMillis() / 1000); - Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); - store.createTable(table); - Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, - emptyParameters); - store.addPartition(part); - return table; - } - - private Table createMockTable(String type) throws Exception { - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", type, "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - int currentTime = (int)(System.currentTimeMillis() / 1000); - Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); - store.createTable(table); - return table; - } - - private Table createMultiColumnTable(String tblName, String... types) throws Exception { - List cols = new ArrayList(); - for (int i = 0; i < types.length; i++) cols.add(new FieldSchema("col" + i, types[i], "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - int currentTime = (int)(System.currentTimeMillis() / 1000); - Table table = new Table(tblName, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); - store.createTable(table); - return table; - } - - /** - * Returns a dummy table level ColumnStatisticsDesc with default values - */ - private ColumnStatisticsDesc getMockTblColStatsDesc() { - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(DEFAULT_TIME); - desc.setDbName(DB); - desc.setTableName(TBL); - desc.setIsTblLevel(true); - return desc; - } - - /** - * Returns a dummy partition level ColumnStatisticsDesc - */ - private ColumnStatisticsDesc getMockPartColStatsDesc(String partKey, String partVal) { - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(DEFAULT_TIME); - desc.setDbName(DB); - desc.setTableName(TBL); - // part1=val1 - desc.setPartName(partKey + PART_KV_SEPARATOR + partVal); - desc.setIsTblLevel(false); - return desc; - } - -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java deleted file mode 100644 index b1dc542..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java +++ /dev/null @@ -1,622 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * - */ -public class TestHBaseStoreBitVector { - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStoreBitVector.class.getName()); - static Map emptyParameters = new HashMap(); - // Table with NUM_PART_KEYS partitioning keys and NUM_PARTITIONS values per key - static final int NUM_PART_KEYS = 1; - static final int NUM_PARTITIONS = 5; - static final String DB = "db"; - static final String TBL = "tbl"; - static final String COL = "col"; - static final String PART_KEY_PREFIX = "part"; - static final String PART_VAL_PREFIX = "val"; - static final String PART_KV_SEPARATOR = "="; - static final List PART_KEYS = new ArrayList(); - static final List PART_VALS = new ArrayList(); - // Initialize mock partitions - static { - for (int i = 1; i <= NUM_PART_KEYS; i++) { - PART_KEYS.add(PART_KEY_PREFIX + i); - } - for (int i = 1; i <= NUM_PARTITIONS; i++) { - PART_VALS.add(PART_VAL_PREFIX + i); - } - } - static final long DEFAULT_TIME = System.currentTimeMillis(); - static final String PART_KEY = "part"; - static final String LONG_COL = "longCol"; - static final String LONG_TYPE = "long"; - static final String INT_TYPE = "int"; - static final String INT_VAL = "1234"; - static final String DOUBLE_COL = "doubleCol"; - static final String DOUBLE_TYPE = "double"; - static final String DOUBLE_VAL = "3.1415"; - static final String STRING_COL = "stringCol"; - static final String STRING_TYPE = "string"; - static final String STRING_VAL = "stringval"; - static final String DECIMAL_COL = "decimalCol"; - static final String DECIMAL_TYPE = "decimal(5,3)"; - static final String DECIMAL_VAL = "12.123"; - static List longColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List doubleColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List stringColStatsObjs = new ArrayList( - NUM_PARTITIONS); - static List decimalColStatsObjs = new ArrayList( - NUM_PARTITIONS); - - @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock HTableInterface htable; - SortedMap rows = new TreeMap<>(); - HBaseStore store; - - - @BeforeClass - public static void beforeTest() { - // All data intitializations - populateMockStats(); - } - - private static void populateMockStats() { - ColumnStatisticsObj statsObj; - // Add NUM_PARTITIONS ColumnStatisticsObj of each type - // For aggregate stats test, we'll treat each ColumnStatisticsObj as stats for 1 partition - // For the rest, we'll just pick the 1st ColumnStatisticsObj from this list and use it - for (int i = 0; i < NUM_PARTITIONS; i++) { - statsObj = mockLongStats(i); - longColStatsObjs.add(statsObj); - statsObj = mockDoubleStats(i); - doubleColStatsObjs.add(statsObj); - statsObj = mockStringStats(i); - stringColStatsObjs.add(statsObj); - statsObj = mockDecimalStats(i); - decimalColStatsObjs.add(statsObj); - } - } - - private static ColumnStatisticsObj mockLongStats(int i) { - long high = 120938479124L + 100*i; - long low = -12341243213412124L - 50*i; - long nulls = 23 + i; - long dVs = 213L + 10*i; - String bitVectors = "{0, 1, 2, 3, 4, 5, 6, 7, 8}{1, 2, 3, 4, 5, 6, 7, 8}"; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(LONG_COL); - colStatsObj.setColType(LONG_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - LongColumnStatsData longData = new LongColumnStatsData(); - longData.setHighValue(high); - longData.setLowValue(low); - longData.setNumNulls(nulls); - longData.setNumDVs(dVs); - longData.setBitVectors(bitVectors); - data.setLongStats(longData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockDoubleStats(int i) { - double high = 123423.23423 + 100*i; - double low = 0.00001234233 - 50*i; - long nulls = 92 + i; - long dVs = 1234123421L + 10*i; - String bitVectors = "{0, 1, 2, 3, 4, 5, 6, 7, 8}{0, 2, 3, 4, 5, 6, 7, 8}"; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(DOUBLE_COL); - colStatsObj.setColType(DOUBLE_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); - doubleData.setHighValue(high); - doubleData.setLowValue(low); - doubleData.setNumNulls(nulls); - doubleData.setNumDVs(dVs); - doubleData.setBitVectors(bitVectors); - data.setDoubleStats(doubleData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockStringStats(int i) { - long maxLen = 1234 + 10*i; - double avgLen = 32.3 + i; - long nulls = 987 + 10*i; - long dVs = 906 + i; - String bitVectors = "{0, 1, 2, 3, 4, 5, 6, 7, 8}{0, 1, 3, 4, 5, 6, 7, 8}"; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(STRING_COL); - colStatsObj.setColType(STRING_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - StringColumnStatsData stringData = new StringColumnStatsData(); - stringData.setMaxColLen(maxLen); - stringData.setAvgColLen(avgLen); - stringData.setNumNulls(nulls); - stringData.setNumDVs(dVs); - stringData.setBitVectors(bitVectors); - data.setStringStats(stringData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - private static ColumnStatisticsObj mockDecimalStats(int i) { - Decimal high = new Decimal(); - high.setScale((short)3); - String strHigh = String.valueOf(3876 + 100*i); - high.setUnscaled(strHigh.getBytes()); - Decimal low = new Decimal(); - low.setScale((short)3); - String strLow = String.valueOf(38 + i); - low.setUnscaled(strLow.getBytes()); - long nulls = 13 + i; - long dVs = 923947293L + 100*i; - String bitVectors = "{0, 1, 2, 3, 4, 5, 6, 7, 8}{0, 1, 2, 4, 5, 6, 7, 8}"; - ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); - colStatsObj.setColName(DECIMAL_COL); - colStatsObj.setColType(DECIMAL_TYPE); - ColumnStatisticsData data = new ColumnStatisticsData(); - DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); - decimalData.setHighValue(high); - decimalData.setLowValue(low); - decimalData.setNumNulls(nulls); - decimalData.setNumDVs(dVs); - decimalData.setBitVectors(bitVectors); - data.setDecimalStats(decimalData); - colStatsObj.setStatsData(data); - return colStatsObj; - } - - @AfterClass - public static void afterTest() { - } - - - @Before - public void init() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); - store = MockUtils.init(conf, htable, rows); - } - - @Test - public void longTableStatistics() throws Exception { - createMockTable(LONG_COL, LONG_TYPE); - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = longColStatsObjs.get(0); - LongColumnStatsData longData = obj.getStatsData().getLongStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(LONG_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, dataFromDB.getSetField()); - // Compare LongColumnStatsData - LongColumnStatsData longDataFromDB = dataFromDB.getLongStats(); - Assert.assertEquals(longData.getHighValue(), longDataFromDB.getHighValue()); - Assert.assertEquals(longData.getLowValue(), longDataFromDB.getLowValue()); - Assert.assertEquals(longData.getNumNulls(), longDataFromDB.getNumNulls()); - Assert.assertEquals(longData.getNumDVs(), longDataFromDB.getNumDVs()); - Assert.assertEquals(longData.getBitVectors(), longDataFromDB.getBitVectors()); - } - - @Test - public void doubleTableStatistics() throws Exception { - createMockTable(DOUBLE_COL, DOUBLE_TYPE); - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = doubleColStatsObjs.get(0); - DoubleColumnStatsData doubleData = obj.getStatsData().getDoubleStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(DOUBLE_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, dataFromDB.getSetField()); - // Compare DoubleColumnStatsData - DoubleColumnStatsData doubleDataFromDB = dataFromDB.getDoubleStats(); - Assert.assertEquals(doubleData.getHighValue(), doubleDataFromDB.getHighValue(), 0.01); - Assert.assertEquals(doubleData.getLowValue(), doubleDataFromDB.getLowValue(), 0.01); - Assert.assertEquals(doubleData.getNumNulls(), doubleDataFromDB.getNumNulls()); - Assert.assertEquals(doubleData.getNumDVs(), doubleDataFromDB.getNumDVs()); - Assert.assertEquals(doubleData.getBitVectors(), doubleDataFromDB.getBitVectors()); - } - - @Test - public void stringTableStatistics() throws Exception { - createMockTable(STRING_COL, STRING_TYPE); - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = stringColStatsObjs.get(0); - StringColumnStatsData stringData = obj.getStatsData().getStringStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(STRING_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, dataFromDB.getSetField()); - // Compare StringColumnStatsData - StringColumnStatsData stringDataFromDB = dataFromDB.getStringStats(); - Assert.assertEquals(stringData.getMaxColLen(), stringDataFromDB.getMaxColLen()); - Assert.assertEquals(stringData.getAvgColLen(), stringDataFromDB.getAvgColLen(), 0.01); - Assert.assertEquals(stringData.getNumNulls(), stringDataFromDB.getNumNulls()); - Assert.assertEquals(stringData.getNumDVs(), stringDataFromDB.getNumDVs()); - Assert.assertEquals(stringData.getBitVectors(), stringDataFromDB.getBitVectors()); - } - - @Test - public void decimalTableStatistics() throws Exception { - createMockTable(DECIMAL_COL, DECIMAL_TYPE); - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for table level stats - ColumnStatisticsDesc desc = getMockTblColStatsDesc(); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = decimalColStatsObjs.get(0); - DecimalColumnStatsData decimalData = obj.getStatsData().getDecimalStats(); - // Add to DB - stats.addToStatsObj(obj); - store.updateTableColumnStatistics(stats); - // Get from DB - ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(DECIMAL_COL)); - // Compare ColumnStatisticsDesc - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); - Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, dataFromDB.getSetField()); - // Compare DecimalColumnStatsData - DecimalColumnStatsData decimalDataFromDB = dataFromDB.getDecimalStats(); - Assert.assertEquals(decimalData.getHighValue(), decimalDataFromDB.getHighValue()); - Assert.assertEquals(decimalData.getLowValue(), decimalDataFromDB.getLowValue()); - Assert.assertEquals(decimalData.getNumNulls(), decimalDataFromDB.getNumNulls()); - Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs()); - Assert.assertEquals(decimalData.getBitVectors(), decimalDataFromDB.getBitVectors()); - } - - @Test - public void longPartitionStatistics() throws Exception { - createMockTableAndPartition(INT_TYPE, INT_VAL); - // Add partition stats for: LONG_COL and partition: {PART_KEY, INT_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, INT_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = longColStatsObjs.get(0); - LongColumnStatsData longData = obj.getStatsData().getLongStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(INT_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, dataFromDB.getSetField()); - // Compare LongColumnStatsData - LongColumnStatsData longDataFromDB = dataFromDB.getLongStats(); - Assert.assertEquals(longData.getHighValue(), longDataFromDB.getHighValue()); - Assert.assertEquals(longData.getLowValue(), longDataFromDB.getLowValue()); - Assert.assertEquals(longData.getNumNulls(), longDataFromDB.getNumNulls()); - Assert.assertEquals(longData.getNumDVs(), longDataFromDB.getNumDVs()); - Assert.assertEquals(longData.getBitVectors(), longDataFromDB.getBitVectors()); - } - - @Test - public void doublePartitionStatistics() throws Exception { - createMockTableAndPartition(DOUBLE_TYPE, DOUBLE_VAL); - // Add partition stats for: DOUBLE_COL and partition: {PART_KEY, DOUBLE_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DOUBLE_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = doubleColStatsObjs.get(0); - DoubleColumnStatsData doubleData = obj.getStatsData().getDoubleStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(DOUBLE_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, dataFromDB.getSetField()); - // Compare DoubleColumnStatsData - DoubleColumnStatsData doubleDataFromDB = dataFromDB.getDoubleStats(); - Assert.assertEquals(doubleData.getHighValue(), doubleDataFromDB.getHighValue(), 0.01); - Assert.assertEquals(doubleData.getLowValue(), doubleDataFromDB.getLowValue(), 0.01); - Assert.assertEquals(doubleData.getNumNulls(), doubleDataFromDB.getNumNulls()); - Assert.assertEquals(doubleData.getNumDVs(), doubleDataFromDB.getNumDVs()); - Assert.assertEquals(doubleData.getBitVectors(), doubleDataFromDB.getBitVectors()); - } - - @Test - public void stringPartitionStatistics() throws Exception { - createMockTableAndPartition(STRING_TYPE, STRING_VAL); - // Add partition stats for: STRING_COL and partition: {PART_KEY, STRING_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, STRING_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = stringColStatsObjs.get(0); - StringColumnStatsData stringData = obj.getStatsData().getStringStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(STRING_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, dataFromDB.getSetField()); - // Compare StringColumnStatsData - StringColumnStatsData stringDataFromDB = dataFromDB.getStringStats(); - Assert.assertEquals(stringData.getMaxColLen(), stringDataFromDB.getMaxColLen()); - Assert.assertEquals(stringData.getAvgColLen(), stringDataFromDB.getAvgColLen(), 0.01); - Assert.assertEquals(stringData.getNumNulls(), stringDataFromDB.getNumNulls()); - Assert.assertEquals(stringData.getNumDVs(), stringDataFromDB.getNumDVs()); - Assert.assertEquals(stringData.getBitVectors(), stringDataFromDB.getBitVectors()); - } - - @Test - public void decimalPartitionStatistics() throws Exception { - createMockTableAndPartition(DECIMAL_TYPE, DECIMAL_VAL); - // Add partition stats for: DECIMAL_COL and partition: {PART_KEY, DECIMAL_VAL} to DB - // Because of the way our mock implementation works we actually need to not create the table - // before we set statistics on it. - ColumnStatistics stats = new ColumnStatistics(); - // Get a default ColumnStatisticsDesc for partition level stats - ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DECIMAL_VAL); - stats.setStatsDesc(desc); - // Get one of the pre-created ColumnStatisticsObj - ColumnStatisticsObj obj = decimalColStatsObjs.get(0); - DecimalColumnStatsData decimalData = obj.getStatsData().getDecimalStats(); - // Add to DB - stats.addToStatsObj(obj); - List parVals = new ArrayList(); - parVals.add(DECIMAL_VAL); - store.updatePartitionColumnStatistics(stats, parVals); - // Get from DB - List partNames = new ArrayList(); - partNames.add(desc.getPartName()); - List colNames = new ArrayList(); - colNames.add(obj.getColName()); - List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); - // Compare ColumnStatisticsDesc - Assert.assertEquals(1, statsFromDB.size()); - Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); - Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); - Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); - // Compare ColumnStatisticsObj - Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); - ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); - ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); - // Compare ColumnStatisticsData - Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, dataFromDB.getSetField()); - // Compare DecimalColumnStatsData - DecimalColumnStatsData decimalDataFromDB = dataFromDB.getDecimalStats(); - Assert.assertEquals(decimalData.getHighValue(), decimalDataFromDB.getHighValue()); - Assert.assertEquals(decimalData.getLowValue(), decimalDataFromDB.getLowValue()); - Assert.assertEquals(decimalData.getNumNulls(), decimalDataFromDB.getNumNulls()); - Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs()); - Assert.assertEquals(decimalData.getBitVectors(), decimalDataFromDB.getBitVectors()); - } - - private Table createMockTable(String name, String type) throws Exception { - List cols = new ArrayList(); - cols.add(new FieldSchema(name, type, "")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, new ArrayList(), new ArrayList(), params); - int currentTime = (int)(System.currentTimeMillis() / 1000); - Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); - store.createTable(table); - return table; - } - - private Table createMockTableAndPartition(String partType, String partVal) throws Exception { - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", partType, "")); - List vals = new ArrayList(); - vals.add(partVal); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - Map params = new HashMap(); - params.put("key", "value"); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); - int currentTime = (int)(System.currentTimeMillis() / 1000); - Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); - store.createTable(table); - Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, - emptyParameters); - store.addPartition(part); - return table; - } - /** - * Returns a dummy table level ColumnStatisticsDesc with default values - */ - private ColumnStatisticsDesc getMockTblColStatsDesc() { - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(DEFAULT_TIME); - desc.setDbName(DB); - desc.setTableName(TBL); - desc.setIsTblLevel(true); - return desc; - } - - /** - * Returns a dummy partition level ColumnStatisticsDesc - */ - private ColumnStatisticsDesc getMockPartColStatsDesc(String partKey, String partVal) { - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(DEFAULT_TIME); - desc.setDbName(DB); - desc.setTableName(TBL); - // part1=val1 - desc.setPartName(partKey + PART_KV_SEPARATOR + partVal); - desc.setIsTblLevel(false); - return desc; - } - -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java deleted file mode 100644 index 4ccb7dd..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java +++ /dev/null @@ -1,365 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; - -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * - */ -public class TestHBaseStoreCached { - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStoreCached.class.getName()); - static Map emptyParameters = new HashMap(); - - @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock HTableInterface htable; - SortedMap rows = new TreeMap(); - HBaseStore store; - - @Before - public void init() throws IOException { - MockitoAnnotations.initMocks(this); - HiveConf conf = new HiveConf(); - store = MockUtils.init(conf, htable, rows); - } - - @Test - public void createTable() throws Exception { - String tableName = "mytable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", tableName); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals(tableName, t.getTableName()); - } - - @Test - public void alterTable() throws Exception { - String tableName = "alttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - startTime += 10; - table.setLastAccessTime(startTime); - store.alterTable("default", tableName, table); - - Table t = store.getTable("default", tableName); - Assert.assertEquals(1, t.getSd().getColsSize()); - Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp", t.getSd().getLocation()); - Assert.assertEquals("input", t.getSd().getInputFormat()); - Assert.assertEquals("output", t.getSd().getOutputFormat()); - Assert.assertEquals("me", t.getOwner()); - Assert.assertEquals("default", t.getDbName()); - Assert.assertEquals(tableName, t.getTableName()); - Assert.assertEquals(startTime, t.getLastAccessTime()); - } - - @Test - public void dropTable() throws Exception { - String tableName = "dtable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - Table t = store.getTable("default", tableName); - Assert.assertNotNull(t); - - store.dropTable("default", tableName); - Assert.assertNull(store.getTable("default", tableName)); - } - - @Test - public void createPartition() throws Exception { - String dbName = "default"; - String tableName = "myparttable"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = Arrays.asList("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(dbName, tableName, vals); - Assert.assertEquals(1, p.getSd().getColsSize()); - Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); - Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); - Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); - Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); - Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); - Assert.assertEquals("input", p.getSd().getInputFormat()); - Assert.assertEquals("output", p.getSd().getOutputFormat()); - Assert.assertEquals(dbName, p.getDbName()); - Assert.assertEquals(tableName, p.getTableName()); - Assert.assertEquals(1, p.getValuesSize()); - Assert.assertEquals("fred", p.getValues().get(0)); - } - - @Test - public void getPartitions() throws Exception { - String dbName = "default"; - String tableName = "manyParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); - for (String val : partVals) { - List vals = new ArrayList(); - vals.add(val); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + val); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Partition p = store.getPartition(dbName, tableName, vals); - Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); - } - - List parts = store.getPartitions(dbName, tableName, -1); - Assert.assertEquals(5, parts.size()); - String[] pv = new String[5]; - for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0); - Arrays.sort(pv); - Assert.assertArrayEquals(pv, partVals.toArray(new String[5])); - } - - @Test - public void listGetDropPartitionNames() throws Exception { - String dbName = "default"; - String tableName = "listParts"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - partCols.add(new FieldSchema("region", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; - for (String[] pv : partVals) { - List vals = new ArrayList(); - for (String v : pv) vals.add(v); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - } - - List names = store.listPartitionNames(dbName, tableName, (short) -1); - Assert.assertEquals(2, names.size()); - String[] resultNames = names.toArray(new String[names.size()]); - Arrays.sort(resultNames); - Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america", - "pc=tomorrow/region=europe"}); - - List parts = store.getPartitionsByNames(dbName, tableName, names); - Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2])); - Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2])); - - store.dropPartitions(dbName, tableName, names); - List afterDropParts = store.getPartitions(dbName, tableName, -1); - Assert.assertEquals(0, afterDropParts.size()); - } - - - @Test - public void dropPartition() throws Exception { - String dbName = "default"; - String tableName = "myparttable2"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - List partCols = new ArrayList(); - partCols.add(new FieldSchema("pc", "string", "")); - Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); - store.createTable(table); - - List vals = Arrays.asList("fred"); - StorageDescriptor psd = new StorageDescriptor(sd); - psd.setLocation("file:/tmp/pc=fred"); - Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, - emptyParameters); - store.addPartition(part); - - Assert.assertNotNull(store.getPartition(dbName, tableName, vals)); - store.dropPartition(dbName, tableName, vals); - thrown.expect(NoSuchObjectException.class); - store.getPartition(dbName, tableName, vals); - } - - // Due to the way our mock stuff works, we can only insert one column at a time, so we'll test - // each stat type separately. We'll test them together in hte integration tests. - @Test - public void booleanTableStatistics() throws Exception { - long now = System.currentTimeMillis(); - String dbname = "default"; - String tableName = "statstable"; - String boolcol = "boolcol"; - int startTime = (int)(System.currentTimeMillis() / 1000); - List cols = new ArrayList(); - cols.add(new FieldSchema(boolcol, "boolean", "nocomment")); - SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, emptyParameters); - Table table = new Table(tableName, dbname, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); - store.createTable(table); - - long trues = 37; - long falses = 12; - long booleanNulls = 2; - - ColumnStatistics stats = new ColumnStatistics(); - ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); - desc.setLastAnalyzed(now); - desc.setDbName(dbname); - desc.setTableName(tableName); - desc.setIsTblLevel(true); - stats.setStatsDesc(desc); - - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - obj.setColName(boolcol); - obj.setColType("boolean"); - ColumnStatisticsData data = new ColumnStatisticsData(); - BooleanColumnStatsData boolData = new BooleanColumnStatsData(); - boolData.setNumTrues(trues); - boolData.setNumFalses(falses); - boolData.setNumNulls(booleanNulls); - data.setBooleanStats(boolData); - obj.setStatsData(data); - stats.addToStatsObj(obj); - - store.updateTableColumnStatistics(stats); - - stats = store.getTableColumnStatistics(dbname, tableName, Arrays.asList(boolcol)); - Assert.assertEquals(now, stats.getStatsDesc().getLastAnalyzed()); - Assert.assertEquals(dbname, stats.getStatsDesc().getDbName()); - Assert.assertEquals(tableName, stats.getStatsDesc().getTableName()); - Assert.assertTrue(stats.getStatsDesc().isIsTblLevel()); - - Assert.assertEquals(1, stats.getStatsObjSize()); - ColumnStatisticsData colData = obj.getStatsData(); - Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, colData.getSetField()); - boolData = colData.getBooleanStats(); - Assert.assertEquals(trues, boolData.getNumTrues()); - Assert.assertEquals(falses, boolData.getNumFalses()); - Assert.assertEquals(booleanNulls, boolData.getNumNulls()); - } - - -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java deleted file mode 100644 index bea0b34..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import java.util.Iterator; -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * - */ -public class TestSharedStorageDescriptor { - private static final Logger LOG = LoggerFactory.getLogger(TestHBaseStore.class.getName()); - - - @Test - public void changeOnSerde() { - StorageDescriptor sd = new StorageDescriptor(); - SerDeInfo serde = new SerDeInfo(); - serde.setName("serde"); - sd.setSerdeInfo(serde); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.getSerdeInfo().setName("different"); - Assert.assertFalse(sd.getSerdeInfo() == ssd.getSerdeInfo()); - Assert.assertEquals("serde", serde.getName()); - Assert.assertEquals("different", ssd.getSerdeInfo().getName()); - Assert.assertEquals("serde", sd.getSerdeInfo().getName()); - } - - @Test - public void changeOnSkewed() { - StorageDescriptor sd = new StorageDescriptor(); - SkewedInfo skew = new SkewedInfo(); - sd.setSkewedInfo(skew); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.setSkewedInfo(new SkewedInfo()); - Assert.assertFalse(sd.getSkewedInfo() == ssd.getSkewedInfo()); - } - - @Test - public void changeOnUnset() { - StorageDescriptor sd = new StorageDescriptor(); - SkewedInfo skew = new SkewedInfo(); - sd.setSkewedInfo(skew); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.unsetSkewedInfo(); - Assert.assertFalse(sd.getSkewedInfo() == ssd.getSkewedInfo()); - } - - @Test - public void changeOrder() { - StorageDescriptor sd = new StorageDescriptor(); - sd.addToSortCols(new Order("fred", 1)); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.getSortCols().get(0).setOrder(2); - Assert.assertFalse(sd.getSortCols() == ssd.getSortCols()); - Assert.assertEquals(2, ssd.getSortCols().get(0).getOrder()); - Assert.assertEquals(1, sd.getSortCols().get(0).getOrder()); - } - - @Test - public void unsetOrder() { - StorageDescriptor sd = new StorageDescriptor(); - sd.addToSortCols(new Order("fred", 1)); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.unsetSortCols(); - Assert.assertFalse(sd.getSortCols() == ssd.getSortCols()); - Assert.assertEquals(0, ssd.getSortColsSize()); - Assert.assertEquals(1, sd.getSortColsSize()); - } - - @Test - public void changeBucketList() { - StorageDescriptor sd = new StorageDescriptor(); - sd.addToBucketCols(new String("fred")); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - List list = ssd.getBucketCols(); - list.add(new String("bob")); - Assert.assertFalse(sd.getBucketCols() == ssd.getBucketCols()); - Assert.assertEquals(2, ssd.getBucketColsSize()); - Assert.assertEquals(1, sd.getBucketColsSize()); - } - - @Test - public void addToColList() { - StorageDescriptor sd = new StorageDescriptor(); - sd.addToCols(new FieldSchema("fred", "string", "")); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.addToCols(new FieldSchema("joe", "int", "")); - Assert.assertFalse(sd.getCols() == ssd.getCols()); - Assert.assertEquals(2, ssd.getColsSize()); - Assert.assertEquals(1, sd.getColsSize()); - } - - @Test - public void colIterator() { - StorageDescriptor sd = new StorageDescriptor(); - sd.addToCols(new FieldSchema("fred", "string", "")); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - Iterator iter = ssd.getColsIterator(); - Assert.assertTrue(iter.hasNext()); - Assert.assertEquals("fred", iter.next().getName()); - Assert.assertFalse(sd.getCols() == ssd.getCols()); - } - - @Test - public void setReadOnly() { - StorageDescriptor sd = new StorageDescriptor(); - sd.addToCols(new FieldSchema("fred", "string", "")); - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setShared(sd); - ssd.setReadOnly(); - List cols = ssd.getCols(); - Assert.assertEquals(1, cols.size()); - Assert.assertTrue(sd.getCols() == ssd.getCols()); - } - -}