From e6f00698cae7dc45487bffd38d0cdbb8076249d6 Mon Sep 17 00:00:00 2001 From: Alan Gates Date: Tue, 14 Jul 2015 11:50:15 -0700 Subject: [PATCH] HIVE-11294 Add caching of aggregated stats in HBase. --- .../java/org/apache/hadoop/hive/conf/HiveConf.java | 43 +- .../hbase/TestHBaseAggrStatsCacheIntegration.java | 499 +++ .../hive/metastore/hbase/HbaseMetastoreProto.java | 4189 +++++++++++++++++++- .../hbase/AggrStatsInvalidatorFilter.java | 136 + .../hadoop/hive/metastore/hbase/Counter.java | 6 + .../hive/metastore/hbase/HBaseReadWrite.java | 316 +- .../hadoop/hive/metastore/hbase/HBaseStore.java | 47 +- .../hadoop/hive/metastore/hbase/HBaseUtils.java | 81 +- .../hadoop/hive/metastore/hbase/StatsCache.java | 325 ++ .../hbase/stats/ColumnStatsAggregatorFactory.java | 51 + .../metastore/hbase/hbase_metastore_proto.proto | 30 + .../hbase/TestHBaseAggregateStatsCache.java | 316 ++ .../hive/metastore/hbase/TestHBaseStore.java | 2 +- 13 files changed, 5705 insertions(+), 336 deletions(-) create mode 100644 itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java create mode 100644 metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java create mode 100644 metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java create mode 100644 metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 5eb11c2..7677431 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -18,25 +18,7 @@ package org.apache.hadoop.hive.conf; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintStream; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Properties; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import javax.security.auth.login.LoginException; - +import com.google.common.base.Joiner; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -54,7 +36,23 @@ import org.apache.hadoop.util.Shell; import org.apache.hive.common.HiveCompat; -import com.google.common.base.Joiner; +import javax.security.auth.login.LoginException; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * Hive Configuration. @@ -417,6 +415,11 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class", "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection", "Class used to connection to HBase"), + METASTORE_HBASE_STATS_CACHE_OBJECTS("hive.metastore.hbase.stats.cache.objects", + 10000, "How many in stats objects to cache in memory"), + METASTORE_HBASE_STATS_CACHE_TTL("hive.metastore.hbase.stats.cache.ttl", "60s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds stats objects live in memory after they are read from HBase."), METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, "Number of retries while opening a connection to metastore"), diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java new file mode 100644 index 0000000..7e6a2ef --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java @@ -0,0 +1,499 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * Integration tests with HBase Mini-cluster for HBaseStore + */ +public class TestHBaseAggrStatsCacheIntegration extends HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName()); + + @Rule public ExpectedException thrown = ExpectedException.none(); + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + } + + @AfterClass + public static void shutdown() throws Exception { + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + setupConnection(); + setupHBaseStore(); + store.backdoor().getStatsCache().resetCounters(); + } + + private static interface Checker { + void checkStats(AggrStats aggrStats) throws Exception; + } + + @Test + public void hit() throws Exception { + String dbName = "default"; + String tableName = "hit"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + cols.add(new FieldSchema("col2", "varchar", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + obj = new ColumnStatisticsObj(); + obj.setColName("col2"); + obj.setColType("varchar"); + data = new ColumnStatisticsData(); + StringColumnStatsData scsd = new StringColumnStatsData(); + scsd.setAvgColLen(10.3); + scsd.setMaxColLen(2000); + scsd.setNumNulls(3); + scsd.setNumDVs(12342); + data.setStringStats(scsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(4, aggrStats.getPartsFound()); + Assert.assertEquals(2, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(20, bcsd.getNumFalses()); + Assert.assertEquals(40, bcsd.getNumTrues()); + Assert.assertEquals(60, bcsd.getNumNulls()); + + cso = aggrStats.getColStats().get(1); + Assert.assertEquals("col2", cso.getColName()); + Assert.assertEquals("string", cso.getColType()); + StringColumnStatsData scsd = cso.getStatsData().getStringStats(); + Assert.assertEquals(10.3, scsd.getAvgColLen(), 0.1); + Assert.assertEquals(2000, scsd.getMaxColLen()); + Assert.assertEquals(6, scsd.getNumNulls()); + Assert.assertEquals(12342, scsd.getNumDVs()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1", "col2")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + store.backdoor().getStatsCache().flushMemory(); + // Call again, this time it should come from hbase + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(2, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + } + + @Test + public void someWithStats() throws Exception { + String dbName = "default"; + String tableName = "psws"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "long", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + boolean first = true; + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/psws/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + if (first) { + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("long"); + ColumnStatisticsData data = new ColumnStatisticsData(); + LongColumnStatsData lcsd = new LongColumnStatsData(); + lcsd.setHighValue(192L); + lcsd.setLowValue(-20L); + lcsd.setNumNulls(30); + lcsd.setNumDVs(32); + data.setLongStats(lcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + first = false; + } + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(1, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("long", cso.getColType()); + LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); + Assert.assertEquals(192L, lcsd.getHighValue()); + Assert.assertEquals(-20L, lcsd.getLowValue()); + Assert.assertEquals(30, lcsd.getNumNulls()); + Assert.assertEquals(32, lcsd.getNumDVs()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + store.backdoor().getStatsCache().flushMemory(); + // Call again, this time it should come from hbase + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(1, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + + @Test + public void invalidation() throws Exception { + try { + String dbName = "default"; + String tableName = "invalidation"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + List partVals3 = Arrays.asList("tomorrow"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(20, bcsd.getNumFalses()); + Assert.assertEquals(40, bcsd.getNumTrues()); + Assert.assertEquals(60, bcsd.getNumNulls()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Now call a different combination to get it in memory too + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // wake the invalidator and check again to make sure it isn't too aggressive about + // removing our stuff. + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(5, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // Update statistics for 'tomorrow' + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals3.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(100); + bcsd.setNumTrues(200); + bcsd.setNumNulls(300); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + Checker afterUpdate = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(110, bcsd.getNumFalses()); + Assert.assertEquals(220, bcsd.getNumTrues()); + Assert.assertEquals(330, bcsd.getNumNulls()); + } + }; + + store.updatePartitionColumnStatistics(cs, partVals3); + + store.backdoor().getStatsCache().setRunInvalidatorEvery(100); + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + afterUpdate.checkStats(aggrStats); + + // Check that we missed, which means this aggregate was dropped from the cache. + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + + // Check that our other aggregate is still in the cache. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(7, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + + // Drop 'yesterday', so our first aggregate should be dumped from memory and hbase + store.dropPartition(dbName, tableName, partVals2); + + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(1, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(10, bcsd.getNumFalses()); + Assert.assertEquals(20, bcsd.getNumTrues()); + Assert.assertEquals(30, bcsd.getNumNulls()); + } + }.checkStats(aggrStats); + + // Check that we missed, which means this aggregate was dropped from the cache. + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(8, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); + + // Check that our other aggregate is still in the cache. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + afterUpdate.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(9, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); + } finally { + store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); + store.backdoor().getStatsCache().setMaxTimeInCache(500000); + store.backdoor().getStatsCache().wakeInvalidator(); + } + } +} diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java index 2d9e592..314fc7f 100644 --- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -90,6 +90,3688 @@ private PrincipalType(int index, int value) { // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalType) } + public interface AggrStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int64 parts_found = 1; + /** + * required int64 parts_found = 1; + */ + boolean hasPartsFound(); + /** + * required int64 parts_found = 1; + */ + long getPartsFound(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + java.util.List + getColStatsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + int getColStatsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + java.util.List + getColStatsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats} + */ + public static final class AggrStats extends + com.google.protobuf.GeneratedMessage + implements AggrStatsOrBuilder { + // Use AggrStats.newBuilder() to construct. + private AggrStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AggrStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AggrStats defaultInstance; + public static AggrStats getDefaultInstance() { + return defaultInstance; + } + + public AggrStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AggrStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + partsFound_ = input.readInt64(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + colStats_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = java.util.Collections.unmodifiableList(colStats_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggrStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggrStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int64 parts_found = 1; + public static final int PARTS_FOUND_FIELD_NUMBER = 1; + private long partsFound_; + /** + * required int64 parts_found = 1; + */ + public boolean hasPartsFound() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 parts_found = 1; + */ + public long getPartsFound() { + return partsFound_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + public static final int COL_STATS_FIELD_NUMBER = 2; + private java.util.List colStats_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List getColStatsList() { + return colStats_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List + getColStatsOrBuilderList() { + return colStats_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public int getColStatsCount() { + return colStats_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) { + return colStats_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( + int index) { + return colStats_.get(index); + } + + private void initFields() { + partsFound_ = 0L; + colStats_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPartsFound()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getColStatsCount(); i++) { + if (!getColStats(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, partsFound_); + } + for (int i = 0; i < colStats_.size(); i++) { + output.writeMessage(2, colStats_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, partsFound_); + } + for (int i = 0; i < colStats_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, colStats_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getColStatsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + partsFound_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (colStatsBuilder_ == null) { + colStats_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + colStatsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.partsFound_ = partsFound_; + if (colStatsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = java.util.Collections.unmodifiableList(colStats_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.colStats_ = colStats_; + } else { + result.colStats_ = colStatsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance()) return this; + if (other.hasPartsFound()) { + setPartsFound(other.getPartsFound()); + } + if (colStatsBuilder_ == null) { + if (!other.colStats_.isEmpty()) { + if (colStats_.isEmpty()) { + colStats_ = other.colStats_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureColStatsIsMutable(); + colStats_.addAll(other.colStats_); + } + onChanged(); + } + } else { + if (!other.colStats_.isEmpty()) { + if (colStatsBuilder_.isEmpty()) { + colStatsBuilder_.dispose(); + colStatsBuilder_ = null; + colStats_ = other.colStats_; + bitField0_ = (bitField0_ & ~0x00000002); + colStatsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getColStatsFieldBuilder() : null; + } else { + colStatsBuilder_.addAllMessages(other.colStats_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPartsFound()) { + + return false; + } + for (int i = 0; i < getColStatsCount(); i++) { + if (!getColStats(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int64 parts_found = 1; + private long partsFound_ ; + /** + * required int64 parts_found = 1; + */ + public boolean hasPartsFound() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 parts_found = 1; + */ + public long getPartsFound() { + return partsFound_; + } + /** + * required int64 parts_found = 1; + */ + public Builder setPartsFound(long value) { + bitField0_ |= 0x00000001; + partsFound_ = value; + onChanged(); + return this; + } + /** + * required int64 parts_found = 1; + */ + public Builder clearPartsFound() { + bitField0_ = (bitField0_ & ~0x00000001); + partsFound_ = 0L; + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + private java.util.List colStats_ = + java.util.Collections.emptyList(); + private void ensureColStatsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = new java.util.ArrayList(colStats_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> colStatsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List getColStatsList() { + if (colStatsBuilder_ == null) { + return java.util.Collections.unmodifiableList(colStats_); + } else { + return colStatsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public int getColStatsCount() { + if (colStatsBuilder_ == null) { + return colStats_.size(); + } else { + return colStatsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) { + if (colStatsBuilder_ == null) { + return colStats_.get(index); + } else { + return colStatsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder setColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { + if (colStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColStatsIsMutable(); + colStats_.set(index, value); + onChanged(); + } else { + colStatsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder setColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.set(index, builderForValue.build()); + onChanged(); + } else { + colStatsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { + if (colStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColStatsIsMutable(); + colStats_.add(value); + onChanged(); + } else { + colStatsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { + if (colStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColStatsIsMutable(); + colStats_.add(index, value); + onChanged(); + } else { + colStatsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.add(builderForValue.build()); + onChanged(); + } else { + colStatsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.add(index, builderForValue.build()); + onChanged(); + } else { + colStatsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addAllColStats( + java.lang.Iterable values) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + super.addAll(values, colStats_); + onChanged(); + } else { + colStatsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder clearColStats() { + if (colStatsBuilder_ == null) { + colStats_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + colStatsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder removeColStats(int index) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.remove(index); + onChanged(); + } else { + colStatsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder getColStatsBuilder( + int index) { + return getColStatsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( + int index) { + if (colStatsBuilder_ == null) { + return colStats_.get(index); } else { + return colStatsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List + getColStatsOrBuilderList() { + if (colStatsBuilder_ != null) { + return colStatsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(colStats_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder() { + return getColStatsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder( + int index) { + return getColStatsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List + getColStatsBuilderList() { + return getColStatsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> + getColStatsFieldBuilder() { + if (colStatsBuilder_ == null) { + colStatsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>( + colStats_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + colStats_ = null; + } + return colStatsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats) + } + + static { + defaultInstance = new AggrStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats) + } + + public interface AggrStatsBloomFilterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes db_name = 1; + /** + * required bytes db_name = 1; + */ + boolean hasDbName(); + /** + * required bytes db_name = 1; + */ + com.google.protobuf.ByteString getDbName(); + + // required bytes table_name = 2; + /** + * required bytes table_name = 2; + */ + boolean hasTableName(); + /** + * required bytes table_name = 2; + */ + com.google.protobuf.ByteString getTableName(); + + // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + boolean hasBloomFilter(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder(); + + // required int64 aggregated_at = 4; + /** + * required int64 aggregated_at = 4; + */ + boolean hasAggregatedAt(); + /** + * required int64 aggregated_at = 4; + */ + long getAggregatedAt(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter} + */ + public static final class AggrStatsBloomFilter extends + com.google.protobuf.GeneratedMessage + implements AggrStatsBloomFilterOrBuilder { + // Use AggrStatsBloomFilter.newBuilder() to construct. + private AggrStatsBloomFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AggrStatsBloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AggrStatsBloomFilter defaultInstance; + public static AggrStatsBloomFilter getDefaultInstance() { + return defaultInstance; + } + + public AggrStatsBloomFilter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AggrStatsBloomFilter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + dbName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + tableName_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = bloomFilter_.toBuilder(); + } + bloomFilter_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(bloomFilter_); + bloomFilter_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 32: { + bitField0_ |= 0x00000008; + aggregatedAt_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggrStatsBloomFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggrStatsBloomFilter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface BloomFilterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int32 num_bits = 1; + /** + * required int32 num_bits = 1; + */ + boolean hasNumBits(); + /** + * required int32 num_bits = 1; + */ + int getNumBits(); + + // required int32 num_funcs = 2; + /** + * required int32 num_funcs = 2; + */ + boolean hasNumFuncs(); + /** + * required int32 num_funcs = 2; + */ + int getNumFuncs(); + + // repeated int64 bits = 3; + /** + * repeated int64 bits = 3; + */ + java.util.List getBitsList(); + /** + * repeated int64 bits = 3; + */ + int getBitsCount(); + /** + * repeated int64 bits = 3; + */ + long getBits(int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter} + */ + public static final class BloomFilter extends + com.google.protobuf.GeneratedMessage + implements BloomFilterOrBuilder { + // Use BloomFilter.newBuilder() to construct. + private BloomFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BloomFilter defaultInstance; + public static BloomFilter getDefaultInstance() { + return defaultInstance; + } + + public BloomFilter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BloomFilter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + numBits_ = input.readInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + numFuncs_ = input.readInt32(); + break; + } + case 24: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + bits_.add(input.readInt64()); + break; + } + case 26: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + bits_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + while (input.getBytesUntilLimit() > 0) { + bits_.add(input.readInt64()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = java.util.Collections.unmodifiableList(bits_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BloomFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BloomFilter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 num_bits = 1; + public static final int NUM_BITS_FIELD_NUMBER = 1; + private int numBits_; + /** + * required int32 num_bits = 1; + */ + public boolean hasNumBits() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 num_bits = 1; + */ + public int getNumBits() { + return numBits_; + } + + // required int32 num_funcs = 2; + public static final int NUM_FUNCS_FIELD_NUMBER = 2; + private int numFuncs_; + /** + * required int32 num_funcs = 2; + */ + public boolean hasNumFuncs() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 num_funcs = 2; + */ + public int getNumFuncs() { + return numFuncs_; + } + + // repeated int64 bits = 3; + public static final int BITS_FIELD_NUMBER = 3; + private java.util.List bits_; + /** + * repeated int64 bits = 3; + */ + public java.util.List + getBitsList() { + return bits_; + } + /** + * repeated int64 bits = 3; + */ + public int getBitsCount() { + return bits_.size(); + } + /** + * repeated int64 bits = 3; + */ + public long getBits(int index) { + return bits_.get(index); + } + + private void initFields() { + numBits_ = 0; + numFuncs_ = 0; + bits_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNumBits()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNumFuncs()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, numBits_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt32(2, numFuncs_); + } + for (int i = 0; i < bits_.size(); i++) { + output.writeInt64(3, bits_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, numBits_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, numFuncs_); + } + { + int dataSize = 0; + for (int i = 0; i < bits_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt64SizeNoTag(bits_.get(i)); + } + size += dataSize; + size += 1 * getBitsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + numBits_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + numFuncs_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + bits_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.numBits_ = numBits_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.numFuncs_ = numFuncs_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = java.util.Collections.unmodifiableList(bits_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.bits_ = bits_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) return this; + if (other.hasNumBits()) { + setNumBits(other.getNumBits()); + } + if (other.hasNumFuncs()) { + setNumFuncs(other.getNumFuncs()); + } + if (!other.bits_.isEmpty()) { + if (bits_.isEmpty()) { + bits_ = other.bits_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureBitsIsMutable(); + bits_.addAll(other.bits_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNumBits()) { + + return false; + } + if (!hasNumFuncs()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 num_bits = 1; + private int numBits_ ; + /** + * required int32 num_bits = 1; + */ + public boolean hasNumBits() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 num_bits = 1; + */ + public int getNumBits() { + return numBits_; + } + /** + * required int32 num_bits = 1; + */ + public Builder setNumBits(int value) { + bitField0_ |= 0x00000001; + numBits_ = value; + onChanged(); + return this; + } + /** + * required int32 num_bits = 1; + */ + public Builder clearNumBits() { + bitField0_ = (bitField0_ & ~0x00000001); + numBits_ = 0; + onChanged(); + return this; + } + + // required int32 num_funcs = 2; + private int numFuncs_ ; + /** + * required int32 num_funcs = 2; + */ + public boolean hasNumFuncs() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 num_funcs = 2; + */ + public int getNumFuncs() { + return numFuncs_; + } + /** + * required int32 num_funcs = 2; + */ + public Builder setNumFuncs(int value) { + bitField0_ |= 0x00000002; + numFuncs_ = value; + onChanged(); + return this; + } + /** + * required int32 num_funcs = 2; + */ + public Builder clearNumFuncs() { + bitField0_ = (bitField0_ & ~0x00000002); + numFuncs_ = 0; + onChanged(); + return this; + } + + // repeated int64 bits = 3; + private java.util.List bits_ = java.util.Collections.emptyList(); + private void ensureBitsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = new java.util.ArrayList(bits_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated int64 bits = 3; + */ + public java.util.List + getBitsList() { + return java.util.Collections.unmodifiableList(bits_); + } + /** + * repeated int64 bits = 3; + */ + public int getBitsCount() { + return bits_.size(); + } + /** + * repeated int64 bits = 3; + */ + public long getBits(int index) { + return bits_.get(index); + } + /** + * repeated int64 bits = 3; + */ + public Builder setBits( + int index, long value) { + ensureBitsIsMutable(); + bits_.set(index, value); + onChanged(); + return this; + } + /** + * repeated int64 bits = 3; + */ + public Builder addBits(long value) { + ensureBitsIsMutable(); + bits_.add(value); + onChanged(); + return this; + } + /** + * repeated int64 bits = 3; + */ + public Builder addAllBits( + java.lang.Iterable values) { + ensureBitsIsMutable(); + super.addAll(values, bits_); + onChanged(); + return this; + } + /** + * repeated int64 bits = 3; + */ + public Builder clearBits() { + bits_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter) + } + + static { + defaultInstance = new BloomFilter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter) + } + + private int bitField0_; + // required bytes db_name = 1; + public static final int DB_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString dbName_; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + + // required bytes table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString tableName_; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + public static final int BLOOM_FILTER_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public boolean hasBloomFilter() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() { + return bloomFilter_; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() { + return bloomFilter_; + } + + // required int64 aggregated_at = 4; + public static final int AGGREGATED_AT_FIELD_NUMBER = 4; + private long aggregatedAt_; + /** + * required int64 aggregated_at = 4; + */ + public boolean hasAggregatedAt() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required int64 aggregated_at = 4; + */ + public long getAggregatedAt() { + return aggregatedAt_; + } + + private void initFields() { + dbName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = com.google.protobuf.ByteString.EMPTY; + bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + aggregatedAt_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDbName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBloomFilter()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasAggregatedAt()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBloomFilter().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, bloomFilter_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, aggregatedAt_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, bloomFilter_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, aggregatedAt_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBloomFilterFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + dbName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + if (bloomFilterBuilder_ == null) { + bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + } else { + bloomFilterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + aggregatedAt_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.dbName_ = dbName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (bloomFilterBuilder_ == null) { + result.bloomFilter_ = bloomFilter_; + } else { + result.bloomFilter_ = bloomFilterBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.aggregatedAt_ = aggregatedAt_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance()) return this; + if (other.hasDbName()) { + setDbName(other.getDbName()); + } + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasBloomFilter()) { + mergeBloomFilter(other.getBloomFilter()); + } + if (other.hasAggregatedAt()) { + setAggregatedAt(other.getAggregatedAt()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDbName()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasBloomFilter()) { + + return false; + } + if (!hasAggregatedAt()) { + + return false; + } + if (!getBloomFilter().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes db_name = 1; + private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + /** + * required bytes db_name = 1; + */ + public Builder setDbName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + dbName_ = value; + onChanged(); + return this; + } + /** + * required bytes db_name = 1; + */ + public Builder clearDbName() { + bitField0_ = (bitField0_ & ~0x00000001); + dbName_ = getDefaultInstance().getDbName(); + onChanged(); + return this; + } + + // required bytes table_name = 2; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + /** + * required bytes table_name = 2; + */ + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + tableName_ = value; + onChanged(); + return this; + } + /** + * required bytes table_name = 2; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000002); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> bloomFilterBuilder_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public boolean hasBloomFilter() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() { + if (bloomFilterBuilder_ == null) { + return bloomFilter_; + } else { + return bloomFilterBuilder_.getMessage(); + } + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder setBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) { + if (bloomFilterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + bloomFilter_ = value; + onChanged(); + } else { + bloomFilterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder setBloomFilter( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder builderForValue) { + if (bloomFilterBuilder_ == null) { + bloomFilter_ = builderForValue.build(); + onChanged(); + } else { + bloomFilterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder mergeBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) { + if (bloomFilterBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + bloomFilter_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) { + bloomFilter_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder(bloomFilter_).mergeFrom(value).buildPartial(); + } else { + bloomFilter_ = value; + } + onChanged(); + } else { + bloomFilterBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder clearBloomFilter() { + if (bloomFilterBuilder_ == null) { + bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + onChanged(); + } else { + bloomFilterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder getBloomFilterBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getBloomFilterFieldBuilder().getBuilder(); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() { + if (bloomFilterBuilder_ != null) { + return bloomFilterBuilder_.getMessageOrBuilder(); + } else { + return bloomFilter_; + } + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> + getBloomFilterFieldBuilder() { + if (bloomFilterBuilder_ == null) { + bloomFilterBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder>( + bloomFilter_, + getParentForChildren(), + isClean()); + bloomFilter_ = null; + } + return bloomFilterBuilder_; + } + + // required int64 aggregated_at = 4; + private long aggregatedAt_ ; + /** + * required int64 aggregated_at = 4; + */ + public boolean hasAggregatedAt() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required int64 aggregated_at = 4; + */ + public long getAggregatedAt() { + return aggregatedAt_; + } + /** + * required int64 aggregated_at = 4; + */ + public Builder setAggregatedAt(long value) { + bitField0_ |= 0x00000008; + aggregatedAt_ = value; + onChanged(); + return this; + } + /** + * required int64 aggregated_at = 4; + */ + public Builder clearAggregatedAt() { + bitField0_ = (bitField0_ & ~0x00000008); + aggregatedAt_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter) + } + + static { + defaultInstance = new AggrStatsBloomFilter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter) + } + + public interface AggrStatsInvalidatorFilterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + java.util.List + getToInvalidateList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + int getToInvalidateCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + java.util.List + getToInvalidateOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( + int index); + + // required int64 run_every = 2; + /** + * required int64 run_every = 2; + */ + boolean hasRunEvery(); + /** + * required int64 run_every = 2; + */ + long getRunEvery(); + + // required int64 max_cache_entry_life = 3; + /** + * required int64 max_cache_entry_life = 3; + */ + boolean hasMaxCacheEntryLife(); + /** + * required int64 max_cache_entry_life = 3; + */ + long getMaxCacheEntryLife(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter} + */ + public static final class AggrStatsInvalidatorFilter extends + com.google.protobuf.GeneratedMessage + implements AggrStatsInvalidatorFilterOrBuilder { + // Use AggrStatsInvalidatorFilter.newBuilder() to construct. + private AggrStatsInvalidatorFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AggrStatsInvalidatorFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AggrStatsInvalidatorFilter defaultInstance; + public static AggrStatsInvalidatorFilter getDefaultInstance() { + return defaultInstance; + } + + public AggrStatsInvalidatorFilter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AggrStatsInvalidatorFilter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + toInvalidate_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.PARSER, extensionRegistry)); + break; + } + case 16: { + bitField0_ |= 0x00000001; + runEvery_ = input.readInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000002; + maxCacheEntryLife_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = java.util.Collections.unmodifiableList(toInvalidate_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggrStatsInvalidatorFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggrStatsInvalidatorFilter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes db_name = 1; + /** + * required bytes db_name = 1; + */ + boolean hasDbName(); + /** + * required bytes db_name = 1; + */ + com.google.protobuf.ByteString getDbName(); + + // required bytes table_name = 2; + /** + * required bytes table_name = 2; + */ + boolean hasTableName(); + /** + * required bytes table_name = 2; + */ + com.google.protobuf.ByteString getTableName(); + + // required bytes part_name = 3; + /** + * required bytes part_name = 3; + */ + boolean hasPartName(); + /** + * required bytes part_name = 3; + */ + com.google.protobuf.ByteString getPartName(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + dbName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + tableName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + partName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes db_name = 1; + public static final int DB_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString dbName_; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + + // required bytes table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString tableName_; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required bytes part_name = 3; + public static final int PART_NAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString partName_; + /** + * required bytes part_name = 3; + */ + public boolean hasPartName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes part_name = 3; + */ + public com.google.protobuf.ByteString getPartName() { + return partName_; + } + + private void initFields() { + dbName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = com.google.protobuf.ByteString.EMPTY; + partName_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDbName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPartName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, partName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, partName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + dbName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + partName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.dbName_ = dbName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.partName_ = partName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()) return this; + if (other.hasDbName()) { + setDbName(other.getDbName()); + } + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasPartName()) { + setPartName(other.getPartName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDbName()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasPartName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes db_name = 1; + private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + /** + * required bytes db_name = 1; + */ + public Builder setDbName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + dbName_ = value; + onChanged(); + return this; + } + /** + * required bytes db_name = 1; + */ + public Builder clearDbName() { + bitField0_ = (bitField0_ & ~0x00000001); + dbName_ = getDefaultInstance().getDbName(); + onChanged(); + return this; + } + + // required bytes table_name = 2; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + /** + * required bytes table_name = 2; + */ + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + tableName_ = value; + onChanged(); + return this; + } + /** + * required bytes table_name = 2; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000002); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required bytes part_name = 3; + private com.google.protobuf.ByteString partName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes part_name = 3; + */ + public boolean hasPartName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes part_name = 3; + */ + public com.google.protobuf.ByteString getPartName() { + return partName_; + } + /** + * required bytes part_name = 3; + */ + public Builder setPartName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + partName_ = value; + onChanged(); + return this; + } + /** + * required bytes part_name = 3; + */ + public Builder clearPartName() { + bitField0_ = (bitField0_ & ~0x00000004); + partName_ = getDefaultInstance().getPartName(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry) + } + + private int bitField0_; + // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + public static final int TO_INVALIDATE_FIELD_NUMBER = 1; + private java.util.List toInvalidate_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List getToInvalidateList() { + return toInvalidate_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List + getToInvalidateOrBuilderList() { + return toInvalidate_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public int getToInvalidateCount() { + return toInvalidate_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index) { + return toInvalidate_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( + int index) { + return toInvalidate_.get(index); + } + + // required int64 run_every = 2; + public static final int RUN_EVERY_FIELD_NUMBER = 2; + private long runEvery_; + /** + * required int64 run_every = 2; + */ + public boolean hasRunEvery() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 run_every = 2; + */ + public long getRunEvery() { + return runEvery_; + } + + // required int64 max_cache_entry_life = 3; + public static final int MAX_CACHE_ENTRY_LIFE_FIELD_NUMBER = 3; + private long maxCacheEntryLife_; + /** + * required int64 max_cache_entry_life = 3; + */ + public boolean hasMaxCacheEntryLife() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 max_cache_entry_life = 3; + */ + public long getMaxCacheEntryLife() { + return maxCacheEntryLife_; + } + + private void initFields() { + toInvalidate_ = java.util.Collections.emptyList(); + runEvery_ = 0L; + maxCacheEntryLife_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRunEvery()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMaxCacheEntryLife()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getToInvalidateCount(); i++) { + if (!getToInvalidate(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < toInvalidate_.size(); i++) { + output.writeMessage(1, toInvalidate_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(2, runEvery_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(3, maxCacheEntryLife_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < toInvalidate_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, toInvalidate_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, runEvery_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, maxCacheEntryLife_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getToInvalidateFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (toInvalidateBuilder_ == null) { + toInvalidate_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + toInvalidateBuilder_.clear(); + } + runEvery_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + maxCacheEntryLife_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (toInvalidateBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = java.util.Collections.unmodifiableList(toInvalidate_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.toInvalidate_ = toInvalidate_; + } else { + result.toInvalidate_ = toInvalidateBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.runEvery_ = runEvery_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.maxCacheEntryLife_ = maxCacheEntryLife_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.getDefaultInstance()) return this; + if (toInvalidateBuilder_ == null) { + if (!other.toInvalidate_.isEmpty()) { + if (toInvalidate_.isEmpty()) { + toInvalidate_ = other.toInvalidate_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureToInvalidateIsMutable(); + toInvalidate_.addAll(other.toInvalidate_); + } + onChanged(); + } + } else { + if (!other.toInvalidate_.isEmpty()) { + if (toInvalidateBuilder_.isEmpty()) { + toInvalidateBuilder_.dispose(); + toInvalidateBuilder_ = null; + toInvalidate_ = other.toInvalidate_; + bitField0_ = (bitField0_ & ~0x00000001); + toInvalidateBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getToInvalidateFieldBuilder() : null; + } else { + toInvalidateBuilder_.addAllMessages(other.toInvalidate_); + } + } + } + if (other.hasRunEvery()) { + setRunEvery(other.getRunEvery()); + } + if (other.hasMaxCacheEntryLife()) { + setMaxCacheEntryLife(other.getMaxCacheEntryLife()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRunEvery()) { + + return false; + } + if (!hasMaxCacheEntryLife()) { + + return false; + } + for (int i = 0; i < getToInvalidateCount(); i++) { + if (!getToInvalidate(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + private java.util.List toInvalidate_ = + java.util.Collections.emptyList(); + private void ensureToInvalidateIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = new java.util.ArrayList(toInvalidate_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder> toInvalidateBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List getToInvalidateList() { + if (toInvalidateBuilder_ == null) { + return java.util.Collections.unmodifiableList(toInvalidate_); + } else { + return toInvalidateBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public int getToInvalidateCount() { + if (toInvalidateBuilder_ == null) { + return toInvalidate_.size(); + } else { + return toInvalidateBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index) { + if (toInvalidateBuilder_ == null) { + return toInvalidate_.get(index); + } else { + return toInvalidateBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder setToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { + if (toInvalidateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureToInvalidateIsMutable(); + toInvalidate_.set(index, value); + onChanged(); + } else { + toInvalidateBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder setToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.set(index, builderForValue.build()); + onChanged(); + } else { + toInvalidateBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { + if (toInvalidateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureToInvalidateIsMutable(); + toInvalidate_.add(value); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { + if (toInvalidateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureToInvalidateIsMutable(); + toInvalidate_.add(index, value); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.add(builderForValue.build()); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.add(index, builderForValue.build()); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addAllToInvalidate( + java.lang.Iterable values) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + super.addAll(values, toInvalidate_); + onChanged(); + } else { + toInvalidateBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder clearToInvalidate() { + if (toInvalidateBuilder_ == null) { + toInvalidate_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + toInvalidateBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder removeToInvalidate(int index) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.remove(index); + onChanged(); + } else { + toInvalidateBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder getToInvalidateBuilder( + int index) { + return getToInvalidateFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( + int index) { + if (toInvalidateBuilder_ == null) { + return toInvalidate_.get(index); } else { + return toInvalidateBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List + getToInvalidateOrBuilderList() { + if (toInvalidateBuilder_ != null) { + return toInvalidateBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(toInvalidate_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder addToInvalidateBuilder() { + return getToInvalidateFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder addToInvalidateBuilder( + int index) { + return getToInvalidateFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List + getToInvalidateBuilderList() { + return getToInvalidateFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder> + getToInvalidateFieldBuilder() { + if (toInvalidateBuilder_ == null) { + toInvalidateBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder>( + toInvalidate_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + toInvalidate_ = null; + } + return toInvalidateBuilder_; + } + + // required int64 run_every = 2; + private long runEvery_ ; + /** + * required int64 run_every = 2; + */ + public boolean hasRunEvery() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 run_every = 2; + */ + public long getRunEvery() { + return runEvery_; + } + /** + * required int64 run_every = 2; + */ + public Builder setRunEvery(long value) { + bitField0_ |= 0x00000002; + runEvery_ = value; + onChanged(); + return this; + } + /** + * required int64 run_every = 2; + */ + public Builder clearRunEvery() { + bitField0_ = (bitField0_ & ~0x00000002); + runEvery_ = 0L; + onChanged(); + return this; + } + + // required int64 max_cache_entry_life = 3; + private long maxCacheEntryLife_ ; + /** + * required int64 max_cache_entry_life = 3; + */ + public boolean hasMaxCacheEntryLife() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required int64 max_cache_entry_life = 3; + */ + public long getMaxCacheEntryLife() { + return maxCacheEntryLife_; + } + /** + * required int64 max_cache_entry_life = 3; + */ + public Builder setMaxCacheEntryLife(long value) { + bitField0_ |= 0x00000004; + maxCacheEntryLife_ = value; + onChanged(); + return this; + } + /** + * required int64 max_cache_entry_life = 3; + */ + public Builder clearMaxCacheEntryLife() { + bitField0_ = (bitField0_ & ~0x00000004); + maxCacheEntryLife_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter) + } + + static { + defaultInstance = new AggrStatsInvalidatorFilter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter) + } + public interface ColumnStatsOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -221,6 +3903,21 @@ private PrincipalType(int index, int value) { * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder(); + + // optional string column_name = 11; + /** + * optional string column_name = 11; + */ + boolean hasColumnName(); + /** + * optional string column_name = 11; + */ + java.lang.String getColumnName(); + /** + * optional string column_name = 11; + */ + com.google.protobuf.ByteString + getColumnNameBytes(); } /** * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} @@ -371,6 +4068,11 @@ private ColumnStats( bitField0_ |= 0x00000200; break; } + case 90: { + bitField0_ |= 0x00000400; + columnName_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -3761,6 +7463,49 @@ public boolean hasDecimalStats() { return decimalStats_; } + // optional string column_name = 11; + public static final int COLUMN_NAME_FIELD_NUMBER = 11; + private java.lang.Object columnName_; + /** + * optional string column_name = 11; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string column_name = 11; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + columnName_ = s; + } + return s; + } + } + /** + * optional string column_name = 11; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { lastAnalyzed_ = 0L; columnType_ = ""; @@ -3772,6 +7517,7 @@ private void initFields() { stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + columnName_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3825,6 +7571,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeMessage(10, decimalStats_); } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBytes(11, getColumnNameBytes()); + } getUnknownFields().writeTo(output); } @@ -3874,6 +7623,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(10, decimalStats_); } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getColumnNameBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4040,6 +7793,8 @@ public Builder clear() { decimalStatsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000200); + columnName_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); return this; } @@ -4132,6 +7887,10 @@ public Builder clone() { } else { result.decimalStats_ = decimalStatsBuilder_.build(); } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000400; + } + result.columnName_ = columnName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4180,6 +7939,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr if (other.hasDecimalStats()) { mergeDecimalStats(other.getDecimalStats()); } + if (other.hasColumnName()) { + bitField0_ |= 0x00000400; + columnName_ = other.columnName_; + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5092,6 +8856,80 @@ public Builder clearDecimalStats() { return decimalStatsBuilder_; } + // optional string column_name = 11; + private java.lang.Object columnName_ = ""; + /** + * optional string column_name = 11; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string column_name = 11; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + columnName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string column_name = 11; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string column_name = 11; + */ + public Builder setColumnName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + columnName_ = value; + onChanged(); + return this; + } + /** + * optional string column_name = 11; + */ + public Builder clearColumnName() { + bitField0_ = (bitField0_ & ~0x00000400); + columnName_ = getDefaultInstance().getColumnName(); + onChanged(); + return this; + } + /** + * optional string column_name = 11; + */ + public Builder setColumnNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + columnName_ = value; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) } @@ -25822,6 +29660,31 @@ public Builder clearIsTemporary() { } private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -25971,142 +29834,188 @@ public Builder clearIsTemporary() { static { java.lang.String[] descriptorData = { "\n\033hbase_metastore_proto.proto\022&org.apach" + - "e.hadoop.hive.metastore.hbase\"\310\010\n\013Column" + - "Stats\022\025\n\rlast_analyzed\030\001 \001(\003\022\023\n\013column_t" + - "ype\030\002 \002(\t\022\021\n\tnum_nulls\030\003 \001(\003\022\033\n\023num_dist" + - "inct_values\030\004 \001(\003\022T\n\nbool_stats\030\005 \001(\0132@." + - "org.apache.hadoop.hive.metastore.hbase.C" + - "olumnStats.BooleanStats\022Q\n\nlong_stats\030\006 " + - "\001(\0132=.org.apache.hadoop.hive.metastore.h" + - "base.ColumnStats.LongStats\022U\n\014double_sta" + - "ts\030\007 \001(\0132?.org.apache.hadoop.hive.metast", - "ore.hbase.ColumnStats.DoubleStats\022U\n\014str" + - "ing_stats\030\010 \001(\0132?.org.apache.hadoop.hive" + - ".metastore.hbase.ColumnStats.StringStats" + - "\022U\n\014binary_stats\030\t \001(\0132?.org.apache.hado" + - "op.hive.metastore.hbase.ColumnStats.Stri" + - "ngStats\022W\n\rdecimal_stats\030\n \001(\0132@.org.apa" + - "che.hadoop.hive.metastore.hbase.ColumnSt" + - "ats.DecimalStats\0325\n\014BooleanStats\022\021\n\tnum_" + - "trues\030\001 \001(\003\022\022\n\nnum_falses\030\002 \001(\003\0322\n\tLongS" + - "tats\022\021\n\tlow_value\030\001 \001(\022\022\022\n\nhigh_value\030\002 ", - "\001(\022\0324\n\013DoubleStats\022\021\n\tlow_value\030\001 \001(\001\022\022\n" + - "\nhigh_value\030\002 \001(\001\032=\n\013StringStats\022\026\n\016max_" + - "col_length\030\001 \001(\003\022\026\n\016avg_col_length\030\002 \001(\001" + - "\032\365\001\n\014DecimalStats\022[\n\tlow_value\030\001 \001(\0132H.o" + - "rg.apache.hadoop.hive.metastore.hbase.Co" + - "lumnStats.DecimalStats.Decimal\022\\\n\nhigh_v" + - "alue\030\002 \001(\0132H.org.apache.hadoop.hive.meta" + - "store.hbase.ColumnStats.DecimalStats.Dec" + - "imal\032*\n\007Decimal\022\020\n\010unscaled\030\001 \002(\014\022\r\n\005sca" + - "le\030\002 \002(\005\"\246\002\n\010Database\022\023\n\013description\030\001 \001", - "(\t\022\013\n\003uri\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.or" + - "g.apache.hadoop.hive.metastore.hbase.Par" + - "ameters\022Q\n\nprivileges\030\004 \001(\0132=.org.apache" + - ".hadoop.hive.metastore.hbase.PrincipalPr" + - "ivilegeSet\022\022\n\nowner_name\030\005 \001(\t\022I\n\nowner_" + - "type\030\006 \001(\01625.org.apache.hadoop.hive.meta" + - "store.hbase.PrincipalType\":\n\013FieldSchema" + - "\022\014\n\004name\030\001 \002(\t\022\014\n\004type\030\002 \002(\t\022\017\n\007comment\030" + - "\003 \001(\t\"\206\004\n\010Function\022\022\n\nclass_name\030\001 \001(\t\022\022" + - "\n\nowner_name\030\002 \001(\t\022I\n\nowner_type\030\003 \001(\01625", + "e.hadoop.hive.metastore.hbase\"h\n\tAggrSta" + + "ts\022\023\n\013parts_found\030\001 \002(\003\022F\n\tcol_stats\030\002 \003" + + "(\01323.org.apache.hadoop.hive.metastore.hb" + + "ase.ColumnStats\"\364\001\n\024AggrStatsBloomFilter" + + "\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030\002 \002(\014\022^\n" + + "\014bloom_filter\030\003 \002(\0132H.org.apache.hadoop." + + "hive.metastore.hbase.AggrStatsBloomFilte" + + "r.BloomFilter\022\025\n\raggregated_at\030\004 \002(\003\032@\n\013" + + "BloomFilter\022\020\n\010num_bits\030\001 \002(\005\022\021\n\tnum_fun", + "cs\030\002 \002(\005\022\014\n\004bits\030\003 \003(\003\"\357\001\n\032AggrStatsInva" + + "lidatorFilter\022_\n\rto_invalidate\030\001 \003(\0132H.o" + + "rg.apache.hadoop.hive.metastore.hbase.Ag" + + "grStatsInvalidatorFilter.Entry\022\021\n\trun_ev" + + "ery\030\002 \002(\003\022\034\n\024max_cache_entry_life\030\003 \002(\003\032" + + "?\n\005Entry\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030" + + "\002 \002(\014\022\021\n\tpart_name\030\003 \002(\014\"\335\010\n\013ColumnStats" + + "\022\025\n\rlast_analyzed\030\001 \001(\003\022\023\n\013column_type\030\002" + + " \002(\t\022\021\n\tnum_nulls\030\003 \001(\003\022\033\n\023num_distinct_" + + "values\030\004 \001(\003\022T\n\nbool_stats\030\005 \001(\0132@.org.a", + "pache.hadoop.hive.metastore.hbase.Column" + + "Stats.BooleanStats\022Q\n\nlong_stats\030\006 \001(\0132=" + ".org.apache.hadoop.hive.metastore.hbase." + - "PrincipalType\022\023\n\013create_time\030\004 \001(\022\022T\n\rfu" + - "nction_type\030\005 \001(\0162=.org.apache.hadoop.hi" + - "ve.metastore.hbase.Function.FunctionType" + - "\022S\n\rresource_uris\030\006 \003(\0132<.org.apache.had" + - "oop.hive.metastore.hbase.Function.Resour" + - "ceUri\032\254\001\n\013ResourceUri\022`\n\rresource_type\030\001" + - " \002(\0162I.org.apache.hadoop.hive.metastore." + - "hbase.Function.ResourceUri.ResourceType\022" + - "\013\n\003uri\030\002 \002(\t\".\n\014ResourceType\022\007\n\003JAR\020\001\022\010\n", - "\004FILE\020\002\022\013\n\007ARCHIVE\020\003\"\030\n\014FunctionType\022\010\n\004" + - "JAVA\020\001\",\n\016ParameterEntry\022\013\n\003key\030\001 \002(\t\022\r\n" + - "\005value\030\002 \002(\t\"W\n\nParameters\022I\n\tparameter\030" + - "\001 \003(\01326.org.apache.hadoop.hive.metastore" + - ".hbase.ParameterEntry\"\360\001\n\tPartition\022\023\n\013c" + - "reate_time\030\001 \001(\003\022\030\n\020last_access_time\030\002 \001" + - "(\003\022\020\n\010location\030\003 \001(\t\022I\n\rsd_parameters\030\004 " + - "\001(\01322.org.apache.hadoop.hive.metastore.h" + - "base.Parameters\022\017\n\007sd_hash\030\005 \002(\014\022F\n\npara" + - "meters\030\006 \001(\01322.org.apache.hadoop.hive.me", - "tastore.hbase.Parameters\"\204\001\n\032PrincipalPr" + - "ivilegeSetEntry\022\026\n\016principal_name\030\001 \002(\t\022" + - "N\n\nprivileges\030\002 \003(\0132:.org.apache.hadoop." + - "hive.metastore.hbase.PrivilegeGrantInfo\"" + - "\275\001\n\025PrincipalPrivilegeSet\022Q\n\005users\030\001 \003(\013" + - "2B.org.apache.hadoop.hive.metastore.hbas" + - "e.PrincipalPrivilegeSetEntry\022Q\n\005roles\030\002 " + - "\003(\0132B.org.apache.hadoop.hive.metastore.h" + - "base.PrincipalPrivilegeSetEntry\"\260\001\n\022Priv" + - "ilegeGrantInfo\022\021\n\tprivilege\030\001 \001(\t\022\023\n\013cre", - "ate_time\030\002 \001(\003\022\017\n\007grantor\030\003 \001(\t\022K\n\014grant" + - "or_type\030\004 \001(\01625.org.apache.hadoop.hive.m" + - "etastore.hbase.PrincipalType\022\024\n\014grant_op" + - "tion\030\005 \001(\010\"\374\001\n\rRoleGrantInfo\022\026\n\016principa" + - "l_name\030\001 \002(\t\022M\n\016principal_type\030\002 \002(\01625.o" + - "rg.apache.hadoop.hive.metastore.hbase.Pr" + - "incipalType\022\020\n\010add_time\030\003 \001(\003\022\017\n\007grantor" + - "\030\004 \001(\t\022K\n\014grantor_type\030\005 \001(\01625.org.apach" + - "e.hadoop.hive.metastore.hbase.PrincipalT" + - "ype\022\024\n\014grant_option\030\006 \001(\010\"^\n\021RoleGrantIn", - "foList\022I\n\ngrant_info\030\001 \003(\01325.org.apache." + - "hadoop.hive.metastore.hbase.RoleGrantInf" + - "o\"\030\n\010RoleList\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013c" + - "reate_time\030\001 \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n" + - "\021StorageDescriptor\022A\n\004cols\030\001 \003(\01323.org.a" + - "pache.hadoop.hive.metastore.hbase.FieldS" + - "chema\022\024\n\014input_format\030\002 \001(\t\022\025\n\routput_fo" + - "rmat\030\003 \001(\t\022\025\n\ris_compressed\030\004 \001(\010\022\023\n\013num" + - "_buckets\030\005 \001(\021\022W\n\nserde_info\030\006 \001(\0132C.org" + - ".apache.hadoop.hive.metastore.hbase.Stor", - "ageDescriptor.SerDeInfo\022\023\n\013bucket_cols\030\007" + - " \003(\t\022R\n\tsort_cols\030\010 \003(\0132?.org.apache.had" + - "oop.hive.metastore.hbase.StorageDescript" + - "or.Order\022Y\n\013skewed_info\030\t \001(\0132D.org.apac" + - "he.hadoop.hive.metastore.hbase.StorageDe" + - "scriptor.SkewedInfo\022!\n\031stored_as_sub_dir" + - "ectories\030\n \001(\010\032.\n\005Order\022\023\n\013column_name\030\001" + - " \002(\t\022\020\n\005order\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004n" + - "ame\030\001 \001(\t\022\031\n\021serialization_lib\030\002 \001(\t\022F\n\n" + - "parameters\030\003 \001(\01322.org.apache.hadoop.hiv", - "e.metastore.hbase.Parameters\032\214\003\n\nSkewedI" + - "nfo\022\030\n\020skewed_col_names\030\001 \003(\t\022r\n\021skewed_" + - "col_values\030\002 \003(\0132W.org.apache.hadoop.hiv" + - "e.metastore.hbase.StorageDescriptor.Skew" + - "edInfo.SkewedColValueList\022\206\001\n\036skewed_col" + - "_value_location_maps\030\003 \003(\0132^.org.apache." + - "hadoop.hive.metastore.hbase.StorageDescr" + - "iptor.SkewedInfo.SkewedColValueLocationM" + - "ap\032.\n\022SkewedColValueList\022\030\n\020skewed_col_v" + - "alue\030\001 \003(\t\0327\n\031SkewedColValueLocationMap\022", - "\013\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n" + - "\005owner\030\001 \001(\t\022\023\n\013create_time\030\002 \001(\003\022\030\n\020las" + - "t_access_time\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020" + - "\n\010location\030\005 \001(\t\022I\n\rsd_parameters\030\006 \001(\0132" + - "2.org.apache.hadoop.hive.metastore.hbase" + - ".Parameters\022\017\n\007sd_hash\030\007 \002(\014\022K\n\016partitio" + - "n_keys\030\010 \003(\01323.org.apache.hadoop.hive.me" + - "tastore.hbase.FieldSchema\022F\n\nparameters\030" + - "\t \001(\01322.org.apache.hadoop.hive.metastore" + - ".hbase.Parameters\022\032\n\022view_original_text\030", - "\n \001(\t\022\032\n\022view_expanded_text\030\013 \001(\t\022\022\n\ntab" + - "le_type\030\014 \001(\t\022Q\n\nprivileges\030\r \001(\0132=.org." + - "apache.hadoop.hive.metastore.hbase.Princ" + - "ipalPrivilegeSet\022\024\n\014is_temporary\030\016 \001(\010*#" + - "\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" + "ColumnStats.LongStats\022U\n\014double_stats\030\007 " + + "\001(\0132?.org.apache.hadoop.hive.metastore.h" + + "base.ColumnStats.DoubleStats\022U\n\014string_s" + + "tats\030\010 \001(\0132?.org.apache.hadoop.hive.meta" + + "store.hbase.ColumnStats.StringStats\022U\n\014b" + + "inary_stats\030\t \001(\0132?.org.apache.hadoop.hi" + + "ve.metastore.hbase.ColumnStats.StringSta", + "ts\022W\n\rdecimal_stats\030\n \001(\0132@.org.apache.h" + + "adoop.hive.metastore.hbase.ColumnStats.D" + + "ecimalStats\022\023\n\013column_name\030\013 \001(\t\0325\n\014Bool" + + "eanStats\022\021\n\tnum_trues\030\001 \001(\003\022\022\n\nnum_false" + + "s\030\002 \001(\003\0322\n\tLongStats\022\021\n\tlow_value\030\001 \001(\022\022" + + "\022\n\nhigh_value\030\002 \001(\022\0324\n\013DoubleStats\022\021\n\tlo" + + "w_value\030\001 \001(\001\022\022\n\nhigh_value\030\002 \001(\001\032=\n\013Str" + + "ingStats\022\026\n\016max_col_length\030\001 \001(\003\022\026\n\016avg_" + + "col_length\030\002 \001(\001\032\365\001\n\014DecimalStats\022[\n\tlow" + + "_value\030\001 \001(\0132H.org.apache.hadoop.hive.me", + "tastore.hbase.ColumnStats.DecimalStats.D" + + "ecimal\022\\\n\nhigh_value\030\002 \001(\0132H.org.apache." + + "hadoop.hive.metastore.hbase.ColumnStats." + + "DecimalStats.Decimal\032*\n\007Decimal\022\020\n\010unsca" + + "led\030\001 \002(\014\022\r\n\005scale\030\002 \002(\005\"\246\002\n\010Database\022\023\n" + + "\013description\030\001 \001(\t\022\013\n\003uri\030\002 \001(\t\022F\n\nparam" + + "eters\030\003 \001(\01322.org.apache.hadoop.hive.met" + + "astore.hbase.Parameters\022Q\n\nprivileges\030\004 " + + "\001(\0132=.org.apache.hadoop.hive.metastore.h" + + "base.PrincipalPrivilegeSet\022\022\n\nowner_name", + "\030\005 \001(\t\022I\n\nowner_type\030\006 \001(\01625.org.apache." + + "hadoop.hive.metastore.hbase.PrincipalTyp" + + "e\":\n\013FieldSchema\022\014\n\004name\030\001 \002(\t\022\014\n\004type\030\002" + + " \002(\t\022\017\n\007comment\030\003 \001(\t\"\206\004\n\010Function\022\022\n\ncl" + + "ass_name\030\001 \001(\t\022\022\n\nowner_name\030\002 \001(\t\022I\n\now" + + "ner_type\030\003 \001(\01625.org.apache.hadoop.hive." + + "metastore.hbase.PrincipalType\022\023\n\013create_" + + "time\030\004 \001(\022\022T\n\rfunction_type\030\005 \001(\0162=.org." + + "apache.hadoop.hive.metastore.hbase.Funct" + + "ion.FunctionType\022S\n\rresource_uris\030\006 \003(\0132", + "<.org.apache.hadoop.hive.metastore.hbase" + + ".Function.ResourceUri\032\254\001\n\013ResourceUri\022`\n" + + "\rresource_type\030\001 \002(\0162I.org.apache.hadoop" + + ".hive.metastore.hbase.Function.ResourceU" + + "ri.ResourceType\022\013\n\003uri\030\002 \002(\t\".\n\014Resource" + + "Type\022\007\n\003JAR\020\001\022\010\n\004FILE\020\002\022\013\n\007ARCHIVE\020\003\"\030\n\014" + + "FunctionType\022\010\n\004JAVA\020\001\",\n\016ParameterEntry" + + "\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"W\n\nParamete" + + "rs\022I\n\tparameter\030\001 \003(\01326.org.apache.hadoo" + + "p.hive.metastore.hbase.ParameterEntry\"\360\001", + "\n\tPartition\022\023\n\013create_time\030\001 \001(\003\022\030\n\020last" + + "_access_time\030\002 \001(\003\022\020\n\010location\030\003 \001(\t\022I\n\r" + + "sd_parameters\030\004 \001(\01322.org.apache.hadoop." + + "hive.metastore.hbase.Parameters\022\017\n\007sd_ha" + + "sh\030\005 \002(\014\022F\n\nparameters\030\006 \001(\01322.org.apach" + + "e.hadoop.hive.metastore.hbase.Parameters" + + "\"\204\001\n\032PrincipalPrivilegeSetEntry\022\026\n\016princ" + + "ipal_name\030\001 \002(\t\022N\n\nprivileges\030\002 \003(\0132:.or" + + "g.apache.hadoop.hive.metastore.hbase.Pri" + + "vilegeGrantInfo\"\275\001\n\025PrincipalPrivilegeSe", + "t\022Q\n\005users\030\001 \003(\0132B.org.apache.hadoop.hiv" + + "e.metastore.hbase.PrincipalPrivilegeSetE" + + "ntry\022Q\n\005roles\030\002 \003(\0132B.org.apache.hadoop." + + "hive.metastore.hbase.PrincipalPrivilegeS" + + "etEntry\"\260\001\n\022PrivilegeGrantInfo\022\021\n\tprivil" + + "ege\030\001 \001(\t\022\023\n\013create_time\030\002 \001(\003\022\017\n\007granto" + + "r\030\003 \001(\t\022K\n\014grantor_type\030\004 \001(\01625.org.apac" + + "he.hadoop.hive.metastore.hbase.Principal" + + "Type\022\024\n\014grant_option\030\005 \001(\010\"\374\001\n\rRoleGrant" + + "Info\022\026\n\016principal_name\030\001 \002(\t\022M\n\016principa", + "l_type\030\002 \002(\01625.org.apache.hadoop.hive.me" + + "tastore.hbase.PrincipalType\022\020\n\010add_time\030" + + "\003 \001(\003\022\017\n\007grantor\030\004 \001(\t\022K\n\014grantor_type\030\005" + + " \001(\01625.org.apache.hadoop.hive.metastore." + + "hbase.PrincipalType\022\024\n\014grant_option\030\006 \001(" + + "\010\"^\n\021RoleGrantInfoList\022I\n\ngrant_info\030\001 \003" + + "(\01325.org.apache.hadoop.hive.metastore.hb" + + "ase.RoleGrantInfo\"\030\n\010RoleList\022\014\n\004role\030\001 " + + "\003(\t\"/\n\004Role\022\023\n\013create_time\030\001 \001(\003\022\022\n\nowne" + + "r_name\030\002 \001(\t\"\254\010\n\021StorageDescriptor\022A\n\004co", + "ls\030\001 \003(\01323.org.apache.hadoop.hive.metast" + + "ore.hbase.FieldSchema\022\024\n\014input_format\030\002 " + + "\001(\t\022\025\n\routput_format\030\003 \001(\t\022\025\n\ris_compres" + + "sed\030\004 \001(\010\022\023\n\013num_buckets\030\005 \001(\021\022W\n\nserde_" + + "info\030\006 \001(\0132C.org.apache.hadoop.hive.meta" + + "store.hbase.StorageDescriptor.SerDeInfo\022" + + "\023\n\013bucket_cols\030\007 \003(\t\022R\n\tsort_cols\030\010 \003(\0132" + + "?.org.apache.hadoop.hive.metastore.hbase" + + ".StorageDescriptor.Order\022Y\n\013skewed_info\030" + + "\t \001(\0132D.org.apache.hadoop.hive.metastore", + ".hbase.StorageDescriptor.SkewedInfo\022!\n\031s" + + "tored_as_sub_directories\030\n \001(\010\032.\n\005Order\022" + + "\023\n\013column_name\030\001 \002(\t\022\020\n\005order\030\002 \001(\021:\0011\032|" + + "\n\tSerDeInfo\022\014\n\004name\030\001 \001(\t\022\031\n\021serializati" + + "on_lib\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.org.a" + + "pache.hadoop.hive.metastore.hbase.Parame" + + "ters\032\214\003\n\nSkewedInfo\022\030\n\020skewed_col_names\030" + + "\001 \003(\t\022r\n\021skewed_col_values\030\002 \003(\0132W.org.a" + + "pache.hadoop.hive.metastore.hbase.Storag" + + "eDescriptor.SkewedInfo.SkewedColValueLis", + "t\022\206\001\n\036skewed_col_value_location_maps\030\003 \003" + + "(\0132^.org.apache.hadoop.hive.metastore.hb" + + "ase.StorageDescriptor.SkewedInfo.SkewedC" + + "olValueLocationMap\032.\n\022SkewedColValueList" + + "\022\030\n\020skewed_col_value\030\001 \003(\t\0327\n\031SkewedColV" + + "alueLocationMap\022\013\n\003key\030\001 \003(\t\022\r\n\005value\030\002 " + + "\002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t\022\023\n\013create_t" + + "ime\030\002 \001(\003\022\030\n\020last_access_time\030\003 \001(\003\022\021\n\tr" + + "etention\030\004 \001(\003\022\020\n\010location\030\005 \001(\t\022I\n\rsd_p" + + "arameters\030\006 \001(\01322.org.apache.hadoop.hive", + ".metastore.hbase.Parameters\022\017\n\007sd_hash\030\007" + + " \002(\014\022K\n\016partition_keys\030\010 \003(\01323.org.apach" + + "e.hadoop.hive.metastore.hbase.FieldSchem" + + "a\022F\n\nparameters\030\t \001(\01322.org.apache.hadoo" + + "p.hive.metastore.hbase.Parameters\022\032\n\022vie" + + "w_original_text\030\n \001(\t\022\032\n\022view_expanded_t" + + "ext\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivile" + + "ges\030\r \001(\0132=.org.apache.hadoop.hive.metas" + + "tore.hbase.PrincipalPrivilegeSet\022\024\n\014is_t" + + "emporary\030\016 \001(\010*#\n\rPrincipalType\022\010\n\004USER\020", + "\000\022\010\n\004ROLE\020\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; - internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor, + new java.lang.String[] { "PartsFound", "ColStats", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor, + new java.lang.String[] { "DbName", "TableName", "BloomFilter", "AggregatedAt", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor, + new java.lang.String[] { "NumBits", "NumFuncs", "Bits", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor, + new java.lang.String[] { "ToInvalidate", "RunEvery", "MaxCacheEntryLife", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor, + new java.lang.String[] { "DbName", "TableName", "PartName", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor = + getDescriptor().getMessageTypes().get(3); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor, - new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", }); + new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", "ColumnName", }); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor = internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(0); internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable = new @@ -26144,19 +30053,19 @@ public Builder clearIsTemporary() { internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor, new java.lang.String[] { "Unscaled", "Scale", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(4); internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor, new java.lang.String[] { "Description", "Uri", "Parameters", "Privileges", "OwnerName", "OwnerType", }); internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(5); internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor, new java.lang.String[] { "Name", "Type", "Comment", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(6); internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor, @@ -26168,67 +30077,67 @@ public Builder clearIsTemporary() { internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor, new java.lang.String[] { "ResourceType", "Uri", }); internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(7); internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor, new java.lang.String[] { "Key", "Value", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(8); internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor, new java.lang.String[] { "Parameter", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(9); internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor, new java.lang.String[] { "CreateTime", "LastAccessTime", "Location", "SdParameters", "SdHash", "Parameters", }); internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(10); internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor, new java.lang.String[] { "PrincipalName", "Privileges", }); internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(11); internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor, new java.lang.String[] { "Users", "Roles", }); internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(12); internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor, new java.lang.String[] { "Privilege", "CreateTime", "Grantor", "GrantorType", "GrantOption", }); internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(13); internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor, new java.lang.String[] { "PrincipalName", "PrincipalType", "AddTime", "Grantor", "GrantorType", "GrantOption", }); internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(14); internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor, new java.lang.String[] { "GrantInfo", }); internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(15); internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor, new java.lang.String[] { "Role", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(16); internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor, new java.lang.String[] { "CreateTime", "OwnerName", }); internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(17); internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor, @@ -26264,7 +30173,7 @@ public Builder clearIsTemporary() { internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor, new java.lang.String[] { "Key", "Value", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(18); internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java new file mode 100644 index 0000000..b02d6e6 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterBase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +/** + * Filter for scanning aggregates stats table + */ +public class AggrStatsInvalidatorFilter extends FilterBase { + private static final Log LOG = + LogFactory.getLog(AggrStatsInvalidatorFilter.class.getName()); + private final List entries; + private final long runEvery; + private final long maxCacheEntryLife; + + //boolean match; + + public static Filter parseFrom(byte[] serialized) throws DeserializationException { + try { + return new AggrStatsInvalidatorFilter( + HbaseMetastoreProto.AggrStatsInvalidatorFilter.parseFrom(serialized)); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + } + + /** + * @param proto Protocol buffer representation of this filter. + */ + AggrStatsInvalidatorFilter(HbaseMetastoreProto.AggrStatsInvalidatorFilter proto) { + this.entries = proto.getToInvalidateList(); + this.runEvery = proto.getRunEvery(); + this.maxCacheEntryLife = proto.getMaxCacheEntryLife(); + } + + @Override + public byte[] toByteArray() throws IOException { + return HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() + .addAllToInvalidate(entries) + .setRunEvery(runEvery) + .setMaxCacheEntryLife(maxCacheEntryLife) + .build() + .toByteArray(); + } + + @Override + public boolean filterAllRemaining() throws IOException { + return false; + } + + @Override + public ReturnCode filterKeyValue(Cell cell) throws IOException { + // Is this the partition we want? + if (Arrays.equals(CellUtil.cloneQualifier(cell), HBaseReadWrite.AGGR_STATS_BLOOM_COL)) { + HbaseMetastoreProto.AggrStatsBloomFilter fromCol = + HbaseMetastoreProto.AggrStatsBloomFilter.parseFrom(CellUtil.cloneValue(cell)); + long now = System.currentTimeMillis(); + BloomFilter bloom = null; + if (now - maxCacheEntryLife > fromCol.getAggregatedAt()) { + // It's too old, kill it regardless of whether we were asked to or not. + return ReturnCode.INCLUDE; + } else if (now - runEvery * 2 <= fromCol.getAggregatedAt()) { + // It's too new. We might be stomping on something that was just created. Skip it. + return ReturnCode.NEXT_ROW; + } else { + // Look through each of our entries and see if any of them match. + for (HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry entry : entries) { + // First check if we match on db and table match + if (entry.getDbName().equals(fromCol.getDbName()) && + entry.getTableName().equals(fromCol.getTableName())) { + if (bloom == null) { + // Now, reconstitute the bloom filter and probe it with each of our partition names + bloom = new BloomFilter( + fromCol.getBloomFilter().getBitsList(), + fromCol.getBloomFilter().getNumBits(), + fromCol.getBloomFilter().getNumFuncs()); + } + if (bloom.test(entry.getPartName().toByteArray())) { + // This is most likely a match, so mark it and quit looking. + return ReturnCode.INCLUDE; + } + } + } + } + return ReturnCode.NEXT_ROW; + } else { + return ReturnCode.NEXT_COL; + } + } + + /** + * Extension of {@link org.apache.hive.common.util.BloomFilter} to support serializing it. + */ + static class BloomFilter extends org.apache.hive.common.util.BloomFilter { + BloomFilter(long expectedEntries, double fpp) { + super(expectedEntries, fpp); + } + + BloomFilter(List bits, int numBits, int numFuncs) { + super(); + long[] copied = new long[bits.size()]; + for (int i = 0; i < bits.size(); i++) copied[i] = bits.get(i); + bitSet = new BitSet(copied); + this.numBits = numBits; + numHashFunctions = numFuncs; + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java index 6171fab..2359939 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java @@ -18,6 +18,8 @@ */ package org.apache.hadoop.hive.metastore.hbase; +import com.google.common.annotations.VisibleForTesting; + /** * A simple metric to count how many times something occurs. */ @@ -44,4 +46,8 @@ String dump() { return bldr.toString(); } + @VisibleForTesting long getCnt() { + return cnt; + } + } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index fd6f9f5..4bbfc99 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -37,9 +37,7 @@ import org.apache.hadoop.hbase.filter.RegexStringComparator; import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hive.common.ObjectPair; -import org.apache.hive.common.util.BloomFilter; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.AggregateStatsCache; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -53,8 +51,6 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregator; -import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregatorFactory; import java.io.IOException; import java.security.MessageDigest; @@ -62,6 +58,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -75,6 +72,7 @@ */ class HBaseReadWrite { + @VisibleForTesting final static String AGGR_STATS_TABLE = "HBMS_AGGR_STATS"; @VisibleForTesting final static String DB_TABLE = "HBMS_DBS"; @VisibleForTesting final static String FUNC_TABLE = "HBMS_FUNCS"; @VisibleForTesting final static String GLOBAL_PRIVS_TABLE = "HBMS_GLOBAL_PRIVS"; @@ -89,12 +87,14 @@ /** * List of tables in HBase */ - final static String[] tableNames = { DB_TABLE, FUNC_TABLE, GLOBAL_PRIVS_TABLE, PART_TABLE, - USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, TABLE_TABLE }; + final static String[] tableNames = { AGGR_STATS_TABLE, DB_TABLE, FUNC_TABLE, GLOBAL_PRIVS_TABLE, + PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, + TABLE_TABLE }; final static Map> columnFamilies = new HashMap> (tableNames.length); static { + columnFamilies.put(AGGR_STATS_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(DB_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(FUNC_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(GLOBAL_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); @@ -105,11 +105,21 @@ columnFamilies.put(TABLE_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); } - private final static byte[] CATALOG_COL = "cat".getBytes(HBaseUtils.ENCODING); + /** + * Stores the bloom filter for the aggregated stats, to determine what partitions are in this + * aggregate. + */ + final static byte[] AGGR_STATS_BLOOM_COL = "b".getBytes(HBaseUtils.ENCODING); + private final static byte[] CATALOG_COL = "c".getBytes(HBaseUtils.ENCODING); private final static byte[] ROLES_COL = "roles".getBytes(HBaseUtils.ENCODING); private final static byte[] REF_COUNT_COL = "ref".getBytes(HBaseUtils.ENCODING); - private final static byte[] GLOBAL_PRIVS_KEY = "globalprivs".getBytes(HBaseUtils.ENCODING); + private final static byte[] AGGR_STATS_STATS_COL = "s".getBytes(HBaseUtils.ENCODING); + private final static byte[] GLOBAL_PRIVS_KEY = "gp".getBytes(HBaseUtils.ENCODING); private final static int TABLES_TO_CACHE = 10; + // False positives are very bad here because they cause us to invalidate entries we shouldn't. + // Space used and # of hash functions grows in proportion to ln of num bits so a 10x increase + // in accuracy doubles the required space and number of hash functions. + private final static double STATS_BF_ERROR_RATE = 0.001; @VisibleForTesting final static String TEST_CONN = "test_connection"; private static HBaseConnection testConn; @@ -135,7 +145,7 @@ protected HBaseReadWrite initialValue() { private ObjectCache, Table> tableCache; private ObjectCache sdCache; private PartitionCache partCache; - private AggregateStatsCache aggrStatsCache; + private StatsCache statsCache; private Counter tableHits; private Counter tableMisses; private Counter tableOverflows; @@ -239,8 +249,8 @@ private HBaseReadWrite(Configuration configuration) { sdCache = new ObjectCache(sdsCacheSize, sdHits, sdMisses, sdOverflows); partCache = new PartitionCache(totalCatalogObjectsToCache, partHits, partMisses, partOverflows); - aggrStatsCache = AggregateStatsCache.getInstance(conf); } + statsCache = StatsCache.getInstance(conf); roleCache = new HashMap(); entireRoleTableInCache = false; } @@ -252,14 +262,6 @@ static synchronized void createTablesIfNotExist() throws IOException { if (self.get().conn.getHBaseTable(name, true) == null) { List families = columnFamilies.get(name); self.get().conn.createHBaseTable(name, families); - /* - List columnFamilies = new ArrayList(); - columnFamilies.add(CATALOG_CF); - if (TABLE_TABLE.equals(name) || PART_TABLE.equals(name)) { - columnFamilies.add(STATS_CF); - } - self.get().conn.createHBaseTable(name, columnFamilies); - */ } } tablesCreated = true; @@ -1465,13 +1467,12 @@ public int hashCode() { * * @param dbName database the table is in * @param tableName table to update statistics for - * @param partName name of the partition, can be null if these are table level statistics. * @param partVals partition values that define partition to update statistics for. If this is * null, then these will be assumed to be table level statistics * @param stats Stats object with stats for one or more columns * @throws IOException */ - void updateStatistics(String dbName, String tableName, String partName, List partVals, + void updateStatistics(String dbName, String tableName, List partVals, ColumnStatistics stats) throws IOException { byte[] key = getStatisticsKey(dbName, tableName, partVals); String hbaseTable = getStatisticsTable(partVals); @@ -1534,171 +1535,155 @@ ColumnStatistics getTableStatistics(String dbName, String tblName, List * to translate from partName to partVals * @param colNames column names to fetch stats for. These columns will be fetched for all * requested partitions - * @return list of ColumnStats, one for each partition. The values will be in the same order as - * the partNames list that was passed in + * @return list of ColumnStats, one for each partition for which we found at least one column's + * stats. * @throws IOException */ List getPartitionStatistics(String dbName, String tblName, List partNames, List> partVals, List colNames) throws IOException { - List statsList = new ArrayList(partNames.size()); - ColumnStatistics partitionStats; - ColumnStatisticsDesc statsDesc; - byte[][] colKeys = new byte[colNames.size()][]; - List gets = new ArrayList(); - // Initialize the list and build the Gets - for (int pOff = 0; pOff < partNames.size(); pOff++) { - // Add an entry for this partition in the stats list - partitionStats = new ColumnStatistics(); - statsDesc = new ColumnStatisticsDesc(); - statsDesc.setIsTblLevel(false); - statsDesc.setDbName(dbName); - statsDesc.setTableName(tblName); - statsDesc.setPartName(partNames.get(pOff)); - partitionStats.setStatsDesc(statsDesc); - statsList.add(partitionStats); - // Build the list of Gets - for (int i = 0; i < colKeys.length; i++) { - colKeys[i] = HBaseUtils.buildKey(colNames.get(i)); - } - byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, partVals.get(pOff)); + List statsList = new ArrayList<>(partNames.size()); + Map, String> valToPartMap = new HashMap<>(partNames.size()); + List gets = new ArrayList<>(partNames.size() * colNames.size()); + assert partNames.size() == partVals.size(); + + byte[][] colNameBytes = new byte[colNames.size()][]; + for (int i = 0; i < colNames.size(); i++) { + colNameBytes[i] = HBaseUtils.buildKey(colNames.get(i)); + } + + for (int i = 0; i < partNames.size(); i++) { + valToPartMap.put(partVals.get(i), partNames.get(i)); + byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, partVals.get(i)); Get get = new Get(partKey); - for (byte[] colName : colKeys) { + for (byte[] colName : colNameBytes) { get.addColumn(STATS_CF, colName); } gets.add(get); } HTableInterface htab = conn.getHBaseTable(PART_TABLE); - // Get results from HBase Result[] results = htab.get(gets); - // Deserialize the stats objects and add to stats list - for (int pOff = 0; pOff < results.length; pOff++) { - for (int cOff = 0; cOff < colNames.size(); cOff++) { - byte[] serializedColStats = results[pOff].getValue(STATS_CF, colKeys[cOff]); - if (serializedColStats == null) { - // There were no stats for this column, so skip it - continue; + for (int i = 0; i < results.length; i++) { + ColumnStatistics colStats = null; + for (int j = 0; j < colNameBytes.length; j++) { + byte[] serializedColStats = results[i].getValue(STATS_CF, colNameBytes[j]); + if (serializedColStats != null) { + if (colStats == null) { + // We initialize this late so that we don't create extras in the case of + // partitions with no stats + colStats = new ColumnStatistics(); + statsList.add(colStats); + ColumnStatisticsDesc csd = new ColumnStatisticsDesc(); + + // We need to figure out which partition these call stats are from. To do that we + // recontruct the key. We have to pull the dbName and tableName out of the key to + // find the partition values. + byte[] key = results[i].getRow(); + String[] reconstructedKey = HBaseUtils.parseKey(key); + List reconstructedPartVals = + Arrays.asList(reconstructedKey).subList(2, reconstructedKey.length); + String partName = valToPartMap.get(reconstructedPartVals); + assert partName != null; + csd.setIsTblLevel(false); + csd.setDbName(dbName); + csd.setTableName(tblName); + csd.setPartName(partName); + colStats.setStatsDesc(csd); + } + ColumnStatisticsObj cso = + HBaseUtils.deserializeStatsForOneColumn(colStats, serializedColStats); + cso.setColName(colNames.get(j)); + colStats.addToStatsObj(cso); } - partitionStats = statsList.get(pOff); - ColumnStatisticsObj colStats = - HBaseUtils.deserializeStatsForOneColumn(partitionStats, serializedColStats); - colStats.setColName(colNames.get(cOff)); - partitionStats.addToStatsObj(colStats); } } + return statsList; } /** - * Get aggregate stats for a column from the DB and populate the bloom filter if it's not null - * @param dbName - * @param tblName - * @param partNames - * @param partVals - * @param colNames - * @return + * Get a reference to the stats cache. + * @return the stats cache. + */ + StatsCache getStatsCache() { + return statsCache; + } + + /** + * Get aggregated stats. Only intended for use by + * {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. Others should not call directly + * but should call StatsCache.get instead. + * @param key The md5 hash associated with this partition set + * @return stats if hbase has them, else null * @throws IOException */ - AggrStats getAggrStats(String dbName, String tblName, List partNames, - List> partVals, List colNames) throws IOException { - // One ColumnStatisticsObj per column - List colStatsList = new ArrayList(); - AggregateStatsCache.AggrColStats colStatsAggrCached; - ColumnStatisticsObj colStatsAggr; - int maxPartitionsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode(); - float falsePositiveProbability = aggrStatsCache.getFalsePositiveProbability(); - int partitionsRequested = partNames.size(); - // TODO: Steal extrapolation logic from current MetaStoreDirectSql code - // Right now doing nothing and keeping partitionsFound == partitionsRequested - int partitionsFound = partitionsRequested; - for (String colName : colNames) { - if (partitionsRequested > maxPartitionsPerCacheNode) { - // Read from HBase but don't add to cache since it doesn't qualify the criteria - colStatsAggr = getAggrStatsFromDB(dbName, tblName, colName, partNames, partVals, null); - colStatsList.add(colStatsAggr); - } else { - // Check the cache first - colStatsAggrCached = aggrStatsCache.get(dbName, tblName, colName, partNames); - if (colStatsAggrCached != null) { - colStatsList.add(colStatsAggrCached.getColStats()); - } else { - // Bloom filter for the new node that we will eventually add to the cache - BloomFilter bloomFilter = - new BloomFilter(maxPartitionsPerCacheNode, falsePositiveProbability); - colStatsAggr = - getAggrStatsFromDB(dbName, tblName, colName, partNames, partVals, bloomFilter); - colStatsList.add(colStatsAggr); - // Update the cache to add this new aggregate node - aggrStatsCache.add(dbName, tblName, colName, partitionsFound, colStatsAggr, bloomFilter); - } - } - } - return new AggrStats(colStatsList, partitionsFound); + AggrStats getAggregatedStats(byte[] key) throws IOException{ + byte[] serialized = read(AGGR_STATS_TABLE, key, CATALOG_CF, AGGR_STATS_STATS_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeAggrStats(serialized); } /** - * - * @param dbName - * @param tblName - * @param partNames - * @param partVals - * @param colName - * @param bloomFilter - * @return + * Put aggregated stats Only intended for use by + * {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. Others should not call directly + * but should call StatsCache.put instead. + * @param key The md5 hash associated with this partition set + * @param dbName Database these partitions are in + * @param tableName Table these partitions are in + * @param partNames Partition names + * @param colName Column stats are for + * @param stats Stats + * @throws IOException + */ + void putAggregatedStats(byte[] key, String dbName, String tableName, List partNames, + String colName, AggrStats stats) throws IOException { + // Serialize the part names + List protoNames = new ArrayList<>(partNames.size() + 3); + protoNames.add(dbName); + protoNames.add(tableName); + protoNames.add(colName); + protoNames.addAll(partNames); + // Build a bloom Filter for these partitions + AggrStatsInvalidatorFilter.BloomFilter bloom = new AggrStatsInvalidatorFilter.BloomFilter + (partNames.size(), STATS_BF_ERROR_RATE); + for (String partName : partNames) { + bloom.add(partName.getBytes(HBaseUtils.ENCODING)); + } + byte[] serializedFilter = HBaseUtils.serializeBloomFilter(dbName, tableName, bloom); + + byte[] serializedStats = HBaseUtils.serializeAggrStats(stats); + store(AGGR_STATS_TABLE, key, CATALOG_CF, + new byte[][]{AGGR_STATS_BLOOM_COL, AGGR_STATS_STATS_COL}, + new byte[][]{serializedFilter, serializedStats}); + } + + // TODO - We shouldn't remove an entry from the cache as soon as a single partition is deleted. + // TODO - Instead we should keep track of how many partitions have been deleted and only remove + // TODO - an entry once it passes a certain threshold, like 5%, of partitions have been removed. + // TODO - That requires moving this from a filter to a co-processor. + /** + * Invalidate stats associated with the listed partitions. This method is intended for use + * only by {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. + * @param filter serialized version of the filter to pass + * @return List of md5 hash keys for the partition stat sets that were removed. + * @throws IOException */ - private ColumnStatisticsObj getAggrStatsFromDB(String dbName, String tblName, String colName, - List partNames, List> partVals, BloomFilter bloomFilter) + List + invalidateAggregatedStats(HbaseMetastoreProto.AggrStatsInvalidatorFilter filter) throws IOException { - ColumnStatisticsObj colStatsAggr = new ColumnStatisticsObj(); - boolean colStatsAggrInited = false; - ColumnStatsAggregator colStatsAggregator = null; - List gets = new ArrayList(); - byte[] colKey = HBaseUtils.buildKey(colName); - // Build a list of Gets, one per partition - for (int pOff = 0; pOff < partNames.size(); pOff++) { - byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, partVals.get(pOff)); - Get get = new Get(partKey); - get.addColumn(STATS_CF, colKey); - gets.add(get); + Iterator results = scan(AGGR_STATS_TABLE, new AggrStatsInvalidatorFilter(filter)); + if (!results.hasNext()) return Collections.emptyList(); + List deletes = new ArrayList<>(); + List keys = new ArrayList<>(); + while (results.hasNext()) { + Result result = results.next(); + deletes.add(new Delete(result.getRow())); + keys.add(new StatsCache.StatsCacheKey(result.getRow())); } - HTableInterface htab = conn.getHBaseTable(PART_TABLE); - // Get results from HBase - Result[] results = htab.get(gets); - // Iterate through the results - // The results size and order is the same as the number and order of the Gets - // If the column is not present in a partition, the Result object will be empty - for (int pOff = 0; pOff < partNames.size(); pOff++) { - if (results[pOff].isEmpty()) { - // There were no stats for this column, so skip it - continue; - } - byte[] serializedColStats = results[pOff].getValue(STATS_CF, colKey); - if (serializedColStats == null) { - // There were no stats for this column, so skip it - continue; - } - ColumnStatisticsObj colStats = - HBaseUtils.deserializeStatsForOneColumn(null, serializedColStats); - if (!colStatsAggrInited) { - // This is the 1st column stats object we got - colStatsAggr.setColName(colName); - colStatsAggr.setColType(colStats.getColType()); - colStatsAggr.setStatsData(colStats.getStatsData()); - colStatsAggregator = - ColumnStatsAggregatorFactory.getColumnStatsAggregator(colStats.getStatsData() - .getSetField()); - colStatsAggrInited = true; - } else { - // Perform aggregation with whatever we've already aggregated - colStatsAggregator.aggregate(colStatsAggr, colStats); - } - // Add partition to the bloom filter if it's requested - if (bloomFilter != null) { - bloomFilter.add(partNames.get(pOff).getBytes()); - } - } - return colStatsAggr; + HTableInterface htab = conn.getHBaseTable(AGGR_STATS_TABLE); + htab.delete(deletes); + return keys; } private byte[] getStatisticsKey(String dbName, String tableName, List partVals) { @@ -1718,9 +1703,12 @@ private String getStatisticsTable(List partVals) { * This should be called whenever a new query is started. */ void flushCatalogCache() { - for (Counter counter : counters) { - LOG.debug(counter.dump()); - counter.clear(); + if (LOG.isDebugEnabled()) { + for (Counter counter : counters) { + LOG.debug(counter.dump()); + counter.clear(); + } + statsCache.dumpCounters(); } tableCache.flush(); sdCache.flush(); @@ -1794,6 +1782,10 @@ private void delete(String table, byte[] key, byte[] colFam, byte[] colName) thr return scan(table, null, null, colFam, colName, filter); } + private Iterator scan(String table, Filter filter) throws IOException { + return scan(table, null, null, null, null, filter); + } + private Iterator scan(String table, byte[] keyStart, byte[] keyEnd, byte[] colFam, byte[] colName, Filter filter) throws IOException { HTableInterface htab = conn.getHBaseTable(table); @@ -1804,7 +1796,9 @@ private void delete(String table, byte[] key, byte[] colFam, byte[] colName) thr if (keyEnd != null) { s.setStopRow(keyEnd); } - s.addColumn(colFam, colName); + if (colFam != null && colName != null) { + s.addColumn(colFam, colName); + } if (filter != null) { s.setFilter(filter); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 4fa2ae5..c3ffb1b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -18,6 +18,8 @@ */ package org.apache.hadoop.hive.metastore.hbase; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheLoader; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -370,6 +372,9 @@ public boolean dropPartition(String dbName, String tableName, List part_ openTransaction(); try { getHBase().deletePartition(dbName, tableName, part_vals); + // Drop any cached stats that reference this partitions + getHBase().getStatsCache().invalidate(dbName, tableName, + buildExternalPartName(dbName, tableName, part_vals)); commit = true; return true; } catch (IOException e) { @@ -1472,7 +1477,7 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws openTransaction(); try { getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), - colStats.getStatsDesc().getTableName(), null, null, colStats); + colStats.getStatsDesc().getTableName(), null, colStats); commit = true; return true; } catch (IOException e) { @@ -1491,8 +1496,10 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, openTransaction(); try { getHBase().updateStatistics(statsObj.getStatsDesc().getDbName(), - statsObj.getStatsDesc().getTableName(), statsObj.getStatsDesc().getPartName(), - partVals, statsObj); + statsObj.getStatsDesc().getTableName(), partVals, statsObj); + // We need to invalidate aggregates that include this partition + getHBase().getStatsCache().invalidate(statsObj.getStatsDesc().getDbName(), + statsObj.getStatsDesc().getTableName(), statsObj.getStatsDesc().getPartName()); commit = true; return true; } catch (IOException e) { @@ -1528,7 +1535,6 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName for (String partName : partNames) { partVals.add(partNameToVals(partName)); } - for (String partName : partNames) partVals.add(partNameToVals(partName)); boolean commit = false; openTransaction(); try { @@ -1574,9 +1580,24 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List boolean commit = false; openTransaction(); try { - AggrStats stats = getHBase().getAggrStats(dbName, tblName, partNames, partVals, colNames); + AggrStats aggrStats = new AggrStats(); + for (String colName : colNames) { + try { + AggrStats oneCol = + getHBase().getStatsCache().get(dbName, tblName, partNames, colName); + if (oneCol.getColStatsSize() > 0) { + assert oneCol.getColStatsSize() == 1; + aggrStats.setPartsFound(aggrStats.getPartsFound() + oneCol.getPartsFound()); + aggrStats.addToColStats(oneCol.getColStats().get(0)); + } + } catch (CacheLoader.InvalidCacheLoadException e) { + LOG.debug("Found no stats for column " + colName); + // This means we have no stats at all for this column for these partitions, so just + // move on. + } + } commit = true; - return stats; + return aggrStats; } catch (IOException e) { LOG.error("Unable to fetch aggregate column statistics", e); throw new MetaException("Failed fetching aggregate column statistics, " + e.getMessage()); @@ -2068,7 +2089,7 @@ static String buildExternalPartName(Table table, List partVals) { return FileUtils.makePartName(partCols, partVals); } - private List partNameToVals(String name) { + private static List partNameToVals(String name) { if (name == null) return null; List vals = new ArrayList(); String[] kvp = name.split("/"); @@ -2078,6 +2099,14 @@ static String buildExternalPartName(Table table, List partVals) { return vals; } + static List> partNameListToValList(List partNames) { + List> valLists = new ArrayList>(partNames.size()); + for (String partName : partNames) { + valLists.add(partNameToVals(partName)); + } + return valLists; + } + private String likeToRegex(String like) { if (like == null) return null; // Convert Hive's strange like syntax to Java regex. Per @@ -2097,4 +2126,8 @@ private void commitOrRoleBack(boolean commit) { rollbackTransaction(); } } + + @VisibleForTesting HBaseReadWrite backdoor() { + return getHBase(); + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index 969c979..d82ec07 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -23,11 +23,11 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Decimal; @@ -50,7 +50,6 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.thrift.TFieldIdEnum; import java.io.IOException; import java.nio.charset.Charset; @@ -98,6 +97,11 @@ return protoKey.getBytes(ENCODING); } + static String[] parseKey(byte[] serialized) { + String munged = new String(serialized, ENCODING); + return munged.split(KEY_SEPARATOR_STR); + } + private static HbaseMetastoreProto.Parameters buildParameters(Map params) { List entries = new ArrayList(); @@ -902,14 +906,41 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, return sdParts; } - static byte[] serializeStatsForOneColumn(ColumnStatistics partitionColumnStats, ColumnStatisticsObj colStats) + static byte[] serializeBloomFilter(String dbName, String tableName, + AggrStatsInvalidatorFilter.BloomFilter bloom) { + long[] bitSet = bloom.getBitSet(); + List bits = new ArrayList<>(bitSet.length); + for (int i = 0; i < bitSet.length; i++) bits.add(bitSet[i]); + HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter protoBloom = + HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder() + .setNumBits(bloom.getBitSize()) + .setNumFuncs(bloom.getNumHashFunctions()) + .addAllBits(bits) + .build(); + + HbaseMetastoreProto.AggrStatsBloomFilter proto = + HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder() + .setDbName(ByteString.copyFrom(dbName.getBytes(ENCODING))) + .setTableName(ByteString.copyFrom(tableName.getBytes(ENCODING))) + .setBloomFilter(protoBloom) + .setAggregatedAt(System.currentTimeMillis()) + .build(); + + return proto.toByteArray(); + } + + private static HbaseMetastoreProto.ColumnStats + protoBufStatsForOneColumn(ColumnStatistics partitionColumnStats, ColumnStatisticsObj colStats) throws IOException { HbaseMetastoreProto.ColumnStats.Builder builder = HbaseMetastoreProto.ColumnStats.newBuilder(); - builder.setLastAnalyzed(partitionColumnStats.getStatsDesc().getLastAnalyzed()); - if (colStats.getColType() == null) { - throw new RuntimeException("Column type must be set"); + if (partitionColumnStats != null) { + builder.setLastAnalyzed(partitionColumnStats.getStatsDesc().getLastAnalyzed()); } + assert colStats.getColType() != null; builder.setColumnType(colStats.getColType()); + assert colStats.getColName() != null; + builder.setColumnName(colStats.getColName()); + ColumnStatisticsData colData = colStats.getStatsData(); switch (colData.getSetField()) { case BOOLEAN_STATS: @@ -987,12 +1018,23 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, default: throw new RuntimeException("Woh, bad. Unknown stats type!"); } - return builder.build().toByteArray(); + return builder.build(); + } + + static byte[] serializeStatsForOneColumn(ColumnStatistics partitionColumnStats, + ColumnStatisticsObj colStats) throws IOException { + return protoBufStatsForOneColumn(partitionColumnStats, colStats).toByteArray(); } static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partitionColumnStats, byte[] bytes) throws IOException { HbaseMetastoreProto.ColumnStats proto = HbaseMetastoreProto.ColumnStats.parseFrom(bytes); + return statsForOneColumnFromProtoBuf(partitionColumnStats, proto); + } + + private static ColumnStatisticsObj + statsForOneColumnFromProtoBuf(ColumnStatistics partitionColumnStats, + HbaseMetastoreProto.ColumnStats proto) throws IOException { ColumnStatisticsObj colStats = new ColumnStatisticsObj(); long lastAnalyzed = proto.getLastAnalyzed(); if (partitionColumnStats != null) { @@ -1000,6 +1042,7 @@ static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partiti Math.max(lastAnalyzed, partitionColumnStats.getStatsDesc().getLastAnalyzed())); } colStats.setColType(proto.getColumnType()); + colStats.setColName(proto.getColumnName()); ColumnStatisticsData colData = new ColumnStatisticsData(); if (proto.hasBoolStats()) { @@ -1067,6 +1110,30 @@ static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partiti return colStats; } + static byte[] serializeAggrStats(AggrStats aggrStats) throws IOException { + List protoColStats = + new ArrayList<>(aggrStats.getColStatsSize()); + for (ColumnStatisticsObj cso : aggrStats.getColStats()) { + protoColStats.add(protoBufStatsForOneColumn(null, cso)); + } + return HbaseMetastoreProto.AggrStats.newBuilder() + .setPartsFound(aggrStats.getPartsFound()) + .addAllColStats(protoColStats) + .build() + .toByteArray(); + } + + static AggrStats deserializeAggrStats(byte[] serialized) throws IOException { + HbaseMetastoreProto.AggrStats protoAggrStats = + HbaseMetastoreProto.AggrStats.parseFrom(serialized); + AggrStats aggrStats = new AggrStats(); + aggrStats.setPartsFound(protoAggrStats.getPartsFound()); + for (HbaseMetastoreProto.ColumnStats protoCS : protoAggrStats.getColStatsList()) { + aggrStats.addToColStats(statsForOneColumnFromProtoBuf(null, protoCS)); + } + return aggrStats; + } + /** * @param keyStart byte array representing the start prefix * @return byte array corresponding to the next possible prefix diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java new file mode 100644 index 0000000..610f13b --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java @@ -0,0 +1,325 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.protobuf.ByteString; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregator; +import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregatorFactory; + +import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +/** + * A cache for stats. This is only intended for use by + * {@link org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite} and should not be used outside + * that class. + */ +class StatsCache { + + // How often to run the invalidator, in milliseconds. + private static final long DEFAULT_RUN_INVALIDATOR_EVERY = 5000; + // Maximum amount of time in milliseconds an entry can live in the HBase cache. This is to avoid + // cruft building up over the centuries. + private static final long DEFAULT_MAX_TIME_IN_CACHE = 1000 * 84600 * 7; + private static final Log LOG = LogFactory.getLog(StatsCache.class.getName()); + private static StatsCache self = null; + + private LoadingCache cache; + private MessageDigest md; + private Invalidator invalidator; + private long runInvalidatorEvery = DEFAULT_RUN_INVALIDATOR_EVERY; + private long maxTimeInCache = DEFAULT_MAX_TIME_IN_CACHE; + private boolean invalidatorHasRun; + + @VisibleForTesting Counter misses; + @VisibleForTesting Counter hbaseHits; + @VisibleForTesting Counter totalGets; + + static synchronized StatsCache getInstance(Configuration conf) { + if (self == null) { + self = new StatsCache(conf); + } + return self; + } + + private StatsCache(Configuration conf) { + final StatsCache me = this; + cache = CacheBuilder.newBuilder() + .maximumSize( + HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_HBASE_STATS_CACHE_OBJECTS)) + .expireAfterWrite(HiveConf.getTimeVar(conf, + HiveConf.ConfVars.METASTORE_HBASE_STATS_CACHE_TTL, TimeUnit.SECONDS), TimeUnit.SECONDS) + .build(new CacheLoader() { + @Override + public AggrStats load(StatsCacheKey key) throws Exception { + HBaseReadWrite hrw = HBaseReadWrite.getInstance(); + AggrStats aggrStats = hrw.getAggregatedStats(key.md5); + if (aggrStats == null) { + misses.incr(); + ColumnStatsAggregator aggregator = null; + ColumnStatisticsObj statsObj = null; + aggrStats = new AggrStats(); + LOG.debug("Unable to find aggregated stats for " + key.colName + ", aggregating"); + List css = hrw.getPartitionStatistics(key.dbName, key.tableName, + key.partNames, HBaseStore.partNameListToValList(key.partNames), + Collections.singletonList(key.colName)); + if (css != null && css.size() > 0) { + aggrStats.setPartsFound(css.size()); + for (ColumnStatistics cs : css) { + for (ColumnStatisticsObj cso : cs.getStatsObj()) { + if (statsObj == null) { + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(key.colName, + cso.getStatsData().getSetField()); + } + if (aggregator == null) { + aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator( + cso.getStatsData().getSetField()); + } + aggregator.aggregate(statsObj, cso); + } + } + aggrStats.addToColStats(statsObj); + me.put(key, aggrStats); + } + } else { + hbaseHits.incr(); + } + return aggrStats; + } + }); + misses = new Counter("Stats cache table misses"); + hbaseHits = new Counter("Stats cache table hits"); + totalGets = new Counter("Total get calls to the stats cache"); + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + invalidator = new Invalidator(); + invalidator.setDaemon(true); + invalidator.start(); + } + + /** + * Add an object to the cache. + * @param key Key for this entry + * @param aggrStats stats + * @throws java.io.IOException + */ + void put(StatsCacheKey key, AggrStats aggrStats) throws IOException { + HBaseReadWrite.getInstance().putAggregatedStats(key.md5, key.dbName, key.tableName, + key.partNames, + key.colName, aggrStats); + cache.put(key, aggrStats); + } + + /** + * Get partition level statistics + * @param dbName name of database table is in + * @param tableName name of table + * @param partNames names of the partitions + * @param colName of column to get stats for + * @return stats object for this column, or null if none cached + * @throws java.io.IOException + */ + + AggrStats get(String dbName, String tableName, List partNames, String colName) + throws IOException { + totalGets.incr(); + StatsCacheKey key = new StatsCacheKey(dbName, tableName, partNames, colName, md); + try { + return cache.get(key); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + + /** + * Remove all entries that are related to a particular set of partitions. This should be + * called when partitions are deleted or stats are updated. + * @param dbName name of database table is in + * @param tableName name of table + * @param partName name of the partition + * @throws IOException + */ + void invalidate(String dbName, String tableName, String partName) + throws IOException { + invalidator.addToQueue( + HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.newBuilder() + .setDbName(ByteString.copyFrom(dbName.getBytes(HBaseUtils.ENCODING))) + .setTableName(ByteString.copyFrom(tableName.getBytes(HBaseUtils.ENCODING))) + .setPartName(ByteString.copyFrom(partName.getBytes(HBaseUtils.ENCODING))) + .build()); + } + + void dumpCounters() { + LOG.debug(misses.dump()); + LOG.debug(hbaseHits.dump()); + LOG.debug(totalGets.dump()); + } + + /** + * Completely dump the cache from memory, used to test that we can access stats from HBase itself. + * @throws IOException + */ + @VisibleForTesting void flushMemory() throws IOException { + cache.invalidateAll(); + } + + @VisibleForTesting void resetCounters() { + misses.clear(); + hbaseHits.clear(); + totalGets.clear(); + } + + @VisibleForTesting void setRunInvalidatorEvery(long runEvery) { + runInvalidatorEvery = runEvery; + } + + @VisibleForTesting void setMaxTimeInCache(long maxTime) { + maxTimeInCache = maxTime; + } + + @VisibleForTesting void wakeInvalidator() throws InterruptedException { + invalidatorHasRun = false; + // Wait through 2 cycles so we're sure our entry won't be picked as too new. + Thread.sleep(2 * runInvalidatorEvery); + invalidator.interrupt(); + while (!invalidatorHasRun) { + Thread.sleep(10); + } + } + + static class StatsCacheKey { + final byte[] md5; + String dbName; + String tableName; + List partNames; + String colName; + + StatsCacheKey(byte[] key) { + md5 = key; + } + + StatsCacheKey(String dbName, String tableName, List partNames, String colName, + MessageDigest md) { + this.dbName = dbName; + this.tableName = tableName; + this.partNames = partNames; + this.colName = colName; + + md.reset(); + md.update(dbName.getBytes(HBaseUtils.ENCODING)); + md.update(tableName.getBytes(HBaseUtils.ENCODING)); + Collections.sort(this.partNames); + for (String s : partNames) { + md.update(s.getBytes(HBaseUtils.ENCODING)); + } + md.update(colName.getBytes(HBaseUtils.ENCODING)); + md5 = md.digest(); + } + + @Override + public boolean equals(Object other) { + if (other == null || !(other instanceof StatsCacheKey)) return false; + StatsCacheKey that = (StatsCacheKey)other; + return Arrays.equals(md5, that.md5); + } + + @Override + public int hashCode() { + return Arrays.hashCode(md5); + } + } + + private class Invalidator extends Thread { + private List entries = new ArrayList<>(); + private Lock lock = new ReentrantLock(); + + void addToQueue(HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry entry) { + lock.lock(); + try { + entries.add(entry); + } finally { + lock.unlock(); + } + } + + @Override + public void run() { + while (true) { + long startedAt = System.currentTimeMillis(); + List thisRun = null; + lock.lock(); + try { + if (entries.size() > 0) { + thisRun = entries; + entries = new ArrayList<>(); + } + } finally { + lock.unlock(); + } + + if (thisRun != null) { + try { + HbaseMetastoreProto.AggrStatsInvalidatorFilter filter = + HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() + .setRunEvery(runInvalidatorEvery) + .setMaxCacheEntryLife(maxTimeInCache) + .addAllToInvalidate(thisRun) + .build(); + List keys = + HBaseReadWrite.getInstance().invalidateAggregatedStats(filter); + cache.invalidateAll(keys); + } catch (IOException e) { + // Not a lot I can do here + LOG.error("Caught error while invalidating entries in the cache", e); + } + } + invalidatorHasRun = true; + + try { + sleep(runInvalidatorEvery - (System.currentTimeMillis() - startedAt)); + } catch (InterruptedException e) { + LOG.warn("Interupted while sleeping", e); + } + } + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java index 3fa0614..f333f57 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java @@ -19,7 +19,15 @@ package org.apache.hadoop.hive.metastore.hbase.stats; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; public class ColumnStatsAggregatorFactory { @@ -45,4 +53,47 @@ public static ColumnStatsAggregator getColumnStatsAggregator(_Fields type) { } } + public static ColumnStatisticsObj newColumnStaticsObj(String colName, _Fields type) { + ColumnStatisticsObj cso = new ColumnStatisticsObj(); + ColumnStatisticsData csd = new ColumnStatisticsData(); + cso.setColName(colName); + switch (type) { + case BOOLEAN_STATS: + csd.setBooleanStats(new BooleanColumnStatsData()); + cso.setColType("boolean"); + break; + + case LONG_STATS: + csd.setLongStats(new LongColumnStatsData()); + cso.setColType("long"); + break; + + case DOUBLE_STATS: + csd.setDoubleStats(new DoubleColumnStatsData()); + cso.setColType("double"); + break; + + case STRING_STATS: + csd.setStringStats(new StringColumnStatsData()); + cso.setColType("string"); + break; + + case BINARY_STATS: + csd.setBinaryStats(new BinaryColumnStatsData()); + cso.setColType("binary"); + break; + + case DECIMAL_STATS: + csd.setDecimalStats(new DecimalColumnStatsData()); + cso.setColType("decimal"); + break; + + default: + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + + cso.setStatsData(csd); + return cso; + } + } diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 0aa0d21..3cd8867 100644 --- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -22,6 +22,35 @@ enum PrincipalType { ROLE = 1; } +message AggrStats { + required int64 parts_found = 1; + repeated ColumnStats col_stats = 2; +} + +message AggrStatsBloomFilter { + message BloomFilter { + required int32 num_bits = 1; + required int32 num_funcs = 2; + repeated int64 bits = 3; + } + required bytes db_name = 1; + required bytes table_name = 2; + required BloomFilter bloom_filter = 3; + required int64 aggregated_at = 4; +} + +message AggrStatsInvalidatorFilter { + message Entry { + required bytes db_name = 1; + required bytes table_name = 2; + required bytes part_name = 3; + } + + repeated Entry to_invalidate = 1; + required int64 run_every = 2; + required int64 max_cache_entry_life = 3; +} + message ColumnStats { message BooleanStats { @@ -63,6 +92,7 @@ message ColumnStats { optional StringStats string_stats = 8; optional StringStats binary_stats = 9; optional DecimalStats decimal_stats = 10; + optional string column_name = 11; } message Database { diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java new file mode 100644 index 0000000..af8f5fc --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java @@ -0,0 +1,316 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + +public class TestHBaseAggregateStatsCache { + private static final Log LOG = LogFactory.getLog(TestHBaseAggregateStatsCache.class.getName()); + + @Mock HTableInterface htable; + private HBaseStore store; + SortedMap rows = new TreeMap<>(); + + @Before + public void before() throws IOException { + MockitoAnnotations.initMocks(this); + HiveConf conf = new HiveConf(); + conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + store = MockUtils.init(conf, htable, rows); + store.backdoor().getStatsCache().resetCounters(); + } + + private static interface Checker { + void checkStats(AggrStats aggrStats) throws Exception; + } + + // Do to limitations in the Mock infrastructure we use for HBase testing we can only test + // this for a single column table and we can't really test hits in hbase, only in memory or + // build from scratch. But it's still useful to cover many bugs. More in depth testing with + // multiple columns and with HBase hits is done in TestHBaseAggrStatsCacheIntegration. + + @Test + public void allWithStats() throws Exception { + String dbName = "default"; + String tableName = "hit"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(20, bcsd.getNumFalses()); + Assert.assertEquals(40, bcsd.getNumTrues()); + Assert.assertEquals(60, bcsd.getNumNulls()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + + + @Test + public void noneWithStats() throws Exception { + String dbName = "default"; + String tableName = "nws"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/nws/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(0, aggrStats.getPartsFound()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + } + + @Test + public void someNonexistentPartitions() throws Exception { + String dbName = "default"; + String tableName = "snp"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/hit/ds=" + partVals1.get(0)); + Partition part = new Partition(partVals1, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals1.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("double"); + ColumnStatisticsData data = new ColumnStatisticsData(); + DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); + dcsd.setHighValue(1000.2342343); + dcsd.setLowValue(-20.1234213423); + dcsd.setNumNulls(30); + dcsd.setNumDVs(12342); + data.setDoubleStats(dcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals1); + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(1, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("double", cso.getColType()); + DoubleColumnStatsData dcsd = cso.getStatsData().getDoubleStats(); + Assert.assertEquals(1000.23, dcsd.getHighValue(), 0.01); + Assert.assertEquals(-20.12, dcsd.getLowValue(), 0.01); + Assert.assertEquals(30, dcsd.getNumNulls()); + Assert.assertEquals(12342, dcsd.getNumDVs()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + + @Test + public void nonexistentPartitions() throws Exception { + String dbName = "default"; + String tableName = "nep"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(0, aggrStats.getPartsFound()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + // TODO test invalidation +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index 92c9ba4..9878499 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -124,7 +124,7 @@ @Rule public ExpectedException thrown = ExpectedException.none(); @Mock HTableInterface htable; - SortedMap rows = new TreeMap(); + SortedMap rows = new TreeMap<>(); HBaseStore store; -- 1.7.5.4