commit e56b40207db465109e4ebaf8e69fda9563c75679 Author: Alan Gates Date: Tue Feb 24 21:29:38 2015 -0800 HIVE-9783 Initial patch to use protocol buffers, moved roles only. HIVE-9783 Moved PrincipalPrivilegeSet to protobuf HIVE-9783 Converted databases to protobuf HIVE-9783 Removed RoleList HIVE-9783 Remove GrantInfoWritable and GrantInfoList. Doesn't work yet, NPE in many tests. HIVE-9783 Fixed unit test failures with role grants. HIVE-9783 Moved StorageDescriptor to protobuf HIVE-9783 Moved Partition to protobuf HIVE-9783 Moved table to protobuf HIVE-9763 Fixed unit test broken by changes. HIVE-9783 Moved stats to protobuf, finally done. HIVE-9802 Refactored HBase connection management into HBaseConnection. Haven't tested against an actual cluster yet. HIVE-9783 Updated changes in listRolesWithGrants to reflect protocol buffer changes. HIVE-9802 Removed checkAndPut and checkAndDelete since they are not supported by transactional layers and shouldn't be required once transactions are in. HIVE-9783 Fixed bug in hashing for storage descriptors that made them not work with protocol buffer. Also fixed bug where they were improperly being incremented on some alter statements. Conflicts: metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java HIVE-9783 Fixed NPE in SD hash calculation. HIVE-9802 Fixed issue with createTablesIfNotExists not actually creating the tables. Also made it so that this is only called in test scenarios to save time in production. Added new command to HBaseSchemaTool to expclitly do install and create the tables. HIVE-9783 Fixed serialization and hash issues with skewed info. diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index be6c64a..829d5b7 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -383,6 +383,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS), "Number of seconds for stats items to live in the cache"), + METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class", + "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection", + "Class used to connection to HBase"), METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, "Number of retries while opening a connection to metastore"), diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index e8225da..8eb007f 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -67,7 +67,7 @@ private static Map emptyParameters = new HashMap(); @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock private HConnection hconn; + @Mock private HBaseConnection hconn; private HBaseStore store; private HiveConf conf; @@ -96,14 +96,15 @@ public static void shutdownMiniCluster() throws Exception { @Before public void setupConnection() throws IOException { MockitoAnnotations.initMocks(this); - Mockito.when(hconn.getTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); - Mockito.when(hconn.getTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); - Mockito.when(hconn.getTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); - Mockito.when(hconn.getTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); - Mockito.when(hconn.getTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); conf = new HiveConf(); // Turn off caching, as we want to test actual interaction with HBase conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); hbase.setConnection(hconn); store = new HBaseStore(); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java index 0e508db..24e8c65 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -92,7 +92,7 @@ private static Map emptyParameters = new HashMap(); @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock private HConnection hconn; + @Mock private HBaseConnection hconn; private HBaseStore store; private HiveConf conf; @@ -127,16 +127,17 @@ public static void shutdownMiniCluster() throws Exception { @Before public void setupConnection() throws IOException { MockitoAnnotations.initMocks(this); - Mockito.when(hconn.getTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); - Mockito.when(hconn.getTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); - Mockito.when(hconn.getTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); - Mockito.when(hconn.getTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); - Mockito.when(hconn.getTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); - Mockito.when(hconn.getTable(HBaseReadWrite.GLOBAL_PRIVS_TABLE)).thenReturn(globalPrivsTable); - Mockito.when(hconn.getTable(HBaseReadWrite.USER_TO_ROLE_TABLE)).thenReturn(principalRoleMapTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.GLOBAL_PRIVS_TABLE)).thenReturn(globalPrivsTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.USER_TO_ROLE_TABLE)).thenReturn(principalRoleMapTable); conf = new HiveConf(); // Turn off caching, as we want to test actual interaction with HBase conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); hbase.setConnection(hconn); store = new HBaseStore(); @@ -249,9 +250,11 @@ public void alterTable() throws Exception { startTime += 10; table.setLastAccessTime(startTime); + LOG.debug("XXX alter table test"); store.alterTable("default", tableName, table); Table t = store.getTable("default", tableName); + LOG.debug("Alter table time " + t.getLastAccessTime()); Assert.assertEquals(1, t.getSd().getColsSize()); Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); @@ -699,7 +702,8 @@ public void grantRevokeRoles() throws Exception { Assert.assertEquals(1, grants.size()); Assert.assertEquals("fred", grants.get(0).getPrincipalName()); Assert.assertEquals(PrincipalType.USER, grants.get(0).getPrincipalType()); - Assert.assertTrue(grants.get(0).getGrantTime() >= now); + Assert.assertTrue("Expected grant time of " + now + " got " + grants.get(0).getGrantTime(), + grants.get(0).getGrantTime() >= now); Assert.assertEquals("bob", grants.get(0).getGrantorName()); Assert.assertEquals(PrincipalType.USER, grants.get(0).getGrantorPrincipalType()); Assert.assertFalse(grants.get(0).isGrantOption()); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java index b9b54a2..57c62ca 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -40,6 +40,8 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -64,9 +66,10 @@ private static Map emptyParameters = new HashMap(); @Rule public ExpectedException thrown = ExpectedException.none(); - @Mock private HConnection hconn; + @Mock private HBaseConnection hconn; private HBaseStore store; private HiveConf conf; + private MessageDigest md; @BeforeClass public static void startMiniCluster() throws Exception { @@ -99,20 +102,27 @@ public static void shutdownMiniCluster() throws Exception { @Before public void setupConnection() throws IOException { MockitoAnnotations.initMocks(this); - Mockito.when(hconn.getTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); - Mockito.when(hconn.getTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); - Mockito.when(hconn.getTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); - Mockito.when(hconn.getTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); - Mockito.when(hconn.getTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); - Mockito.when(hconn.getTable(HBaseReadWrite.GLOBAL_PRIVS_TABLE)).thenReturn(globalPrivsTable); - Mockito.when(hconn.getTable(HBaseReadWrite.USER_TO_ROLE_TABLE)).thenReturn(principalRoleMapTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.SD_TABLE)).thenReturn(sdTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.TABLE_TABLE)).thenReturn(tblTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.PART_TABLE)).thenReturn(partTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.DB_TABLE)).thenReturn(dbTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.ROLE_TABLE)).thenReturn(roleTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.GLOBAL_PRIVS_TABLE)).thenReturn(globalPrivsTable); + Mockito.when(hconn.getHBaseTable(HBaseReadWrite.USER_TO_ROLE_TABLE)).thenReturn(principalRoleMapTable); conf = new HiveConf(); // Turn off caching, as we want to test actual interaction with HBase conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); hbase.setConnection(hconn); store = new HBaseStore(); store.setConf(conf); + + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } } @Test @@ -147,13 +157,93 @@ public void createManyPartitions() throws Exception { Assert.assertEquals(1, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + String tableName2 = "differentTable"; sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0, serde, null, null, emptyParameters); - table = new Table("differenttable", "default", "me", startTime, startTime, 0, sd, null, + table = new Table(tableName2, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null); store.createTable(table); Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + // Drop one of the partitions and make sure it doesn't drop the storage descriptor + store.dropPartition(dbName, tableName, Arrays.asList(partVals.get(0))); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + + // Alter the second table in a few ways to make sure it changes it's descriptor properly + table = store.getTable(dbName, tableName2); + byte[] sdHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); + + // Alter the table without touching the storage descriptor + table.setLastAccessTime(startTime + 1); + store.alterTable(dbName, tableName2, table); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + table = store.getTable(dbName, tableName2); + byte[] alteredHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); + Assert.assertArrayEquals(sdHash, alteredHash); + + // Alter the table, changing the storage descriptor + table.getSd().setOutputFormat("output_changed"); + store.alterTable(dbName, tableName2, table); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + table = store.getTable(dbName, tableName2); + alteredHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); + Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); + + // Alter one of the partitions without touching the storage descriptor + Partition part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); + sdHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + part.setLastAccessTime(part.getLastAccessTime() + 1); + store.alterPartition(dbName, tableName, Arrays.asList(partVals.get(1)), part); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); + alteredHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + Assert.assertArrayEquals(sdHash, alteredHash); + + // Alter the partition, changing the storage descriptor + part.getSd().setOutputFormat("output_changed_some_more"); + store.alterPartition(dbName, tableName, Arrays.asList(partVals.get(1)), part); + Assert.assertEquals(3, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); + alteredHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); + + // Alter multiple partitions without touching the storage descriptors + List parts = store.getPartitions(dbName, tableName, -1); + sdHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + for (int i = 1; i < 3; i++) { + parts.get(i).setLastAccessTime(97); + } + List> listPartVals = new ArrayList>(); + for (String pv : partVals.subList(1, partVals.size())) { + listPartVals.add(Arrays.asList(pv)); + } + store.alterPartitions(dbName, tableName, listPartVals, parts); + Assert.assertEquals(3, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + parts = store.getPartitions(dbName, tableName, -1); + alteredHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + Assert.assertArrayEquals(sdHash, alteredHash); + + // Alter multiple partitions changning the storage descriptors + parts = store.getPartitions(dbName, tableName, -1); + sdHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + for (int i = 1; i < 3; i++) { + parts.get(i).getSd().setOutputFormat("yet_a_different_of"); + } + store.alterPartitions(dbName, tableName, listPartVals, parts); + Assert.assertEquals(4, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + parts = store.getPartitions(dbName, tableName, -1); + alteredHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); + + for (String partVal : partVals.subList(1, partVals.size())) { + store.dropPartition(dbName, tableName, Arrays.asList(partVal)); + } + store.dropTable(dbName, tableName); + store.dropTable(dbName, tableName2); + + Assert.assertEquals(0, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + + } } diff --git metastore/pom.xml metastore/pom.xml index a7d5a71..adde0b5 100644 --- metastore/pom.xml +++ metastore/pom.xml @@ -51,6 +51,11 @@ ${guava.version} + com.google.protobuf + protobuf-java + ${protobuf.version} + + com.jolbox bonecp ${bonecp.version} @@ -203,6 +208,39 @@ + + protobuf + + + + org.apache.maven.plugins + maven-antrun-plugin + + + generate-protobuf-sources + generate-sources + + + + + Building HBase Metastore Protobuf + + + + + + + + + + run + + + + + + + @@ -257,6 +295,7 @@ src/model src/gen/thrift/gen-javabean + src/gen/protobuf/gen-java diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java new file mode 100644 index 0000000..a4ff8a3 --- /dev/null +++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -0,0 +1,24244 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: hbase_metastore_proto.proto + +package org.apache.hadoop.hive.metastore.hbase; + +public final class HbaseMetastoreProto { + private HbaseMetastoreProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.PrincipalType} + */ + public enum PrincipalType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * USER = 0; + */ + USER(0, 0), + /** + * ROLE = 1; + */ + ROLE(1, 1), + ; + + /** + * USER = 0; + */ + public static final int USER_VALUE = 0; + /** + * ROLE = 1; + */ + public static final int ROLE_VALUE = 1; + + + public final int getNumber() { return value; } + + public static PrincipalType valueOf(int value) { + switch (value) { + case 0: return USER; + case 1: return ROLE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public PrincipalType findValueByNumber(int number) { + return PrincipalType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.getDescriptor().getEnumTypes().get(0); + } + + private static final PrincipalType[] VALUES = values(); + + public static PrincipalType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private PrincipalType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalType) + } + + public interface ColumnStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 last_analyzed = 1; + /** + * optional int64 last_analyzed = 1; + */ + boolean hasLastAnalyzed(); + /** + * optional int64 last_analyzed = 1; + */ + long getLastAnalyzed(); + + // required string column_type = 2; + /** + * required string column_type = 2; + */ + boolean hasColumnType(); + /** + * required string column_type = 2; + */ + java.lang.String getColumnType(); + /** + * required string column_type = 2; + */ + com.google.protobuf.ByteString + getColumnTypeBytes(); + + // optional int64 num_nulls = 3; + /** + * optional int64 num_nulls = 3; + */ + boolean hasNumNulls(); + /** + * optional int64 num_nulls = 3; + */ + long getNumNulls(); + + // optional int64 num_distinct_values = 4; + /** + * optional int64 num_distinct_values = 4; + */ + boolean hasNumDistinctValues(); + /** + * optional int64 num_distinct_values = 4; + */ + long getNumDistinctValues(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + boolean hasBoolStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + boolean hasLongStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + boolean hasDoubleStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + boolean hasStringStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + boolean hasBinaryStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + boolean hasDecimalStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} + */ + public static final class ColumnStats extends + com.google.protobuf.GeneratedMessage + implements ColumnStatsOrBuilder { + // Use ColumnStats.newBuilder() to construct. + private ColumnStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ColumnStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ColumnStats defaultInstance; + public static ColumnStats getDefaultInstance() { + return defaultInstance; + } + + public ColumnStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ColumnStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lastAnalyzed_ = input.readInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + columnType_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + numNulls_ = input.readInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + numDistinctValues_ = input.readInt64(); + break; + } + case 42: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = boolStats_.toBuilder(); + } + boolStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(boolStats_); + boolStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = longStats_.toBuilder(); + } + longStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(longStats_); + longStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + case 58: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = doubleStats_.toBuilder(); + } + doubleStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(doubleStats_); + doubleStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; + break; + } + case 66: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = stringStats_.toBuilder(); + } + stringStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stringStats_); + stringStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } + case 74: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = binaryStats_.toBuilder(); + } + binaryStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(binaryStats_); + binaryStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000100; + break; + } + case 82: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000200) == 0x00000200)) { + subBuilder = decimalStats_.toBuilder(); + } + decimalStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(decimalStats_); + decimalStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000200; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ColumnStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ColumnStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface BooleanStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 num_trues = 1; + /** + * optional int64 num_trues = 1; + */ + boolean hasNumTrues(); + /** + * optional int64 num_trues = 1; + */ + long getNumTrues(); + + // optional int64 num_falses = 2; + /** + * optional int64 num_falses = 2; + */ + boolean hasNumFalses(); + /** + * optional int64 num_falses = 2; + */ + long getNumFalses(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats} + */ + public static final class BooleanStats extends + com.google.protobuf.GeneratedMessage + implements BooleanStatsOrBuilder { + // Use BooleanStats.newBuilder() to construct. + private BooleanStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BooleanStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BooleanStats defaultInstance; + public static BooleanStats getDefaultInstance() { + return defaultInstance; + } + + public BooleanStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BooleanStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + numTrues_ = input.readInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + numFalses_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BooleanStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BooleanStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 num_trues = 1; + public static final int NUM_TRUES_FIELD_NUMBER = 1; + private long numTrues_; + /** + * optional int64 num_trues = 1; + */ + public boolean hasNumTrues() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 num_trues = 1; + */ + public long getNumTrues() { + return numTrues_; + } + + // optional int64 num_falses = 2; + public static final int NUM_FALSES_FIELD_NUMBER = 2; + private long numFalses_; + /** + * optional int64 num_falses = 2; + */ + public boolean hasNumFalses() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 num_falses = 2; + */ + public long getNumFalses() { + return numFalses_; + } + + private void initFields() { + numTrues_ = 0L; + numFalses_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, numTrues_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, numFalses_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, numTrues_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, numFalses_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + numTrues_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + numFalses_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.numTrues_ = numTrues_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.numFalses_ = numFalses_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance()) return this; + if (other.hasNumTrues()) { + setNumTrues(other.getNumTrues()); + } + if (other.hasNumFalses()) { + setNumFalses(other.getNumFalses()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 num_trues = 1; + private long numTrues_ ; + /** + * optional int64 num_trues = 1; + */ + public boolean hasNumTrues() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 num_trues = 1; + */ + public long getNumTrues() { + return numTrues_; + } + /** + * optional int64 num_trues = 1; + */ + public Builder setNumTrues(long value) { + bitField0_ |= 0x00000001; + numTrues_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_trues = 1; + */ + public Builder clearNumTrues() { + bitField0_ = (bitField0_ & ~0x00000001); + numTrues_ = 0L; + onChanged(); + return this; + } + + // optional int64 num_falses = 2; + private long numFalses_ ; + /** + * optional int64 num_falses = 2; + */ + public boolean hasNumFalses() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 num_falses = 2; + */ + public long getNumFalses() { + return numFalses_; + } + /** + * optional int64 num_falses = 2; + */ + public Builder setNumFalses(long value) { + bitField0_ |= 0x00000002; + numFalses_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_falses = 2; + */ + public Builder clearNumFalses() { + bitField0_ = (bitField0_ & ~0x00000002); + numFalses_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats) + } + + static { + defaultInstance = new BooleanStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats) + } + + public interface LongStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional sint64 low_value = 1; + /** + * optional sint64 low_value = 1; + */ + boolean hasLowValue(); + /** + * optional sint64 low_value = 1; + */ + long getLowValue(); + + // optional sint64 high_value = 2; + /** + * optional sint64 high_value = 2; + */ + boolean hasHighValue(); + /** + * optional sint64 high_value = 2; + */ + long getHighValue(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats} + */ + public static final class LongStats extends + com.google.protobuf.GeneratedMessage + implements LongStatsOrBuilder { + // Use LongStats.newBuilder() to construct. + private LongStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private LongStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final LongStats defaultInstance; + public static LongStats getDefaultInstance() { + return defaultInstance; + } + + public LongStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LongStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lowValue_ = input.readSInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + highValue_ = input.readSInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public LongStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LongStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional sint64 low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private long lowValue_; + /** + * optional sint64 low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional sint64 low_value = 1; + */ + public long getLowValue() { + return lowValue_; + } + + // optional sint64 high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private long highValue_; + /** + * optional sint64 high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint64 high_value = 2; + */ + public long getHighValue() { + return highValue_; + } + + private void initFields() { + lowValue_ = 0L; + highValue_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeSInt64(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSInt64(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lowValue_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + highValue_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lowValue_ = lowValue_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.highValue_ = highValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + setLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + setHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional sint64 low_value = 1; + private long lowValue_ ; + /** + * optional sint64 low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional sint64 low_value = 1; + */ + public long getLowValue() { + return lowValue_; + } + /** + * optional sint64 low_value = 1; + */ + public Builder setLowValue(long value) { + bitField0_ |= 0x00000001; + lowValue_ = value; + onChanged(); + return this; + } + /** + * optional sint64 low_value = 1; + */ + public Builder clearLowValue() { + bitField0_ = (bitField0_ & ~0x00000001); + lowValue_ = 0L; + onChanged(); + return this; + } + + // optional sint64 high_value = 2; + private long highValue_ ; + /** + * optional sint64 high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint64 high_value = 2; + */ + public long getHighValue() { + return highValue_; + } + /** + * optional sint64 high_value = 2; + */ + public Builder setHighValue(long value) { + bitField0_ |= 0x00000002; + highValue_ = value; + onChanged(); + return this; + } + /** + * optional sint64 high_value = 2; + */ + public Builder clearHighValue() { + bitField0_ = (bitField0_ & ~0x00000002); + highValue_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats) + } + + static { + defaultInstance = new LongStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats) + } + + public interface DoubleStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional double low_value = 1; + /** + * optional double low_value = 1; + */ + boolean hasLowValue(); + /** + * optional double low_value = 1; + */ + double getLowValue(); + + // optional double high_value = 2; + /** + * optional double high_value = 2; + */ + boolean hasHighValue(); + /** + * optional double high_value = 2; + */ + double getHighValue(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats} + */ + public static final class DoubleStats extends + com.google.protobuf.GeneratedMessage + implements DoubleStatsOrBuilder { + // Use DoubleStats.newBuilder() to construct. + private DoubleStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DoubleStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DoubleStats defaultInstance; + public static DoubleStats getDefaultInstance() { + return defaultInstance; + } + + public DoubleStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DoubleStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + lowValue_ = input.readDouble(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + highValue_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DoubleStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DoubleStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional double low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private double lowValue_; + /** + * optional double low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double low_value = 1; + */ + public double getLowValue() { + return lowValue_; + } + + // optional double high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private double highValue_; + /** + * optional double high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double high_value = 2; + */ + public double getHighValue() { + return highValue_; + } + + private void initFields() { + lowValue_ = 0D; + highValue_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeDouble(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lowValue_ = 0D; + bitField0_ = (bitField0_ & ~0x00000001); + highValue_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lowValue_ = lowValue_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.highValue_ = highValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + setLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + setHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional double low_value = 1; + private double lowValue_ ; + /** + * optional double low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double low_value = 1; + */ + public double getLowValue() { + return lowValue_; + } + /** + * optional double low_value = 1; + */ + public Builder setLowValue(double value) { + bitField0_ |= 0x00000001; + lowValue_ = value; + onChanged(); + return this; + } + /** + * optional double low_value = 1; + */ + public Builder clearLowValue() { + bitField0_ = (bitField0_ & ~0x00000001); + lowValue_ = 0D; + onChanged(); + return this; + } + + // optional double high_value = 2; + private double highValue_ ; + /** + * optional double high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double high_value = 2; + */ + public double getHighValue() { + return highValue_; + } + /** + * optional double high_value = 2; + */ + public Builder setHighValue(double value) { + bitField0_ |= 0x00000002; + highValue_ = value; + onChanged(); + return this; + } + /** + * optional double high_value = 2; + */ + public Builder clearHighValue() { + bitField0_ = (bitField0_ & ~0x00000002); + highValue_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) + } + + static { + defaultInstance = new DoubleStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) + } + + public interface StringStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 max_col_length = 1; + /** + * optional int64 max_col_length = 1; + */ + boolean hasMaxColLength(); + /** + * optional int64 max_col_length = 1; + */ + long getMaxColLength(); + + // optional double avg_col_length = 2; + /** + * optional double avg_col_length = 2; + */ + boolean hasAvgColLength(); + /** + * optional double avg_col_length = 2; + */ + double getAvgColLength(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} + */ + public static final class StringStats extends + com.google.protobuf.GeneratedMessage + implements StringStatsOrBuilder { + // Use StringStats.newBuilder() to construct. + private StringStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StringStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StringStats defaultInstance; + public static StringStats getDefaultInstance() { + return defaultInstance; + } + + public StringStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StringStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + maxColLength_ = input.readInt64(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + avgColLength_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StringStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StringStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 max_col_length = 1; + public static final int MAX_COL_LENGTH_FIELD_NUMBER = 1; + private long maxColLength_; + /** + * optional int64 max_col_length = 1; + */ + public boolean hasMaxColLength() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 max_col_length = 1; + */ + public long getMaxColLength() { + return maxColLength_; + } + + // optional double avg_col_length = 2; + public static final int AVG_COL_LENGTH_FIELD_NUMBER = 2; + private double avgColLength_; + /** + * optional double avg_col_length = 2; + */ + public boolean hasAvgColLength() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double avg_col_length = 2; + */ + public double getAvgColLength() { + return avgColLength_; + } + + private void initFields() { + maxColLength_ = 0L; + avgColLength_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, maxColLength_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, avgColLength_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, maxColLength_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, avgColLength_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + maxColLength_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + avgColLength_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.maxColLength_ = maxColLength_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.avgColLength_ = avgColLength_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) return this; + if (other.hasMaxColLength()) { + setMaxColLength(other.getMaxColLength()); + } + if (other.hasAvgColLength()) { + setAvgColLength(other.getAvgColLength()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 max_col_length = 1; + private long maxColLength_ ; + /** + * optional int64 max_col_length = 1; + */ + public boolean hasMaxColLength() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 max_col_length = 1; + */ + public long getMaxColLength() { + return maxColLength_; + } + /** + * optional int64 max_col_length = 1; + */ + public Builder setMaxColLength(long value) { + bitField0_ |= 0x00000001; + maxColLength_ = value; + onChanged(); + return this; + } + /** + * optional int64 max_col_length = 1; + */ + public Builder clearMaxColLength() { + bitField0_ = (bitField0_ & ~0x00000001); + maxColLength_ = 0L; + onChanged(); + return this; + } + + // optional double avg_col_length = 2; + private double avgColLength_ ; + /** + * optional double avg_col_length = 2; + */ + public boolean hasAvgColLength() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double avg_col_length = 2; + */ + public double getAvgColLength() { + return avgColLength_; + } + /** + * optional double avg_col_length = 2; + */ + public Builder setAvgColLength(double value) { + bitField0_ |= 0x00000002; + avgColLength_ = value; + onChanged(); + return this; + } + /** + * optional double avg_col_length = 2; + */ + public Builder clearAvgColLength() { + bitField0_ = (bitField0_ & ~0x00000002); + avgColLength_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats) + } + + static { + defaultInstance = new StringStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats) + } + + public interface DecimalStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + boolean hasLowValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + boolean hasHighValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats} + */ + public static final class DecimalStats extends + com.google.protobuf.GeneratedMessage + implements DecimalStatsOrBuilder { + // Use DecimalStats.newBuilder() to construct. + private DecimalStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DecimalStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DecimalStats defaultInstance; + public static DecimalStats getDefaultInstance() { + return defaultInstance; + } + + public DecimalStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DecimalStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = lowValue_.toBuilder(); + } + lowValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lowValue_); + lowValue_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = highValue_.toBuilder(); + } + highValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(highValue_); + highValue_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DecimalStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DecimalStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface DecimalOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes unscaled = 1; + /** + * required bytes unscaled = 1; + */ + boolean hasUnscaled(); + /** + * required bytes unscaled = 1; + */ + com.google.protobuf.ByteString getUnscaled(); + + // required int32 scale = 2; + /** + * required int32 scale = 2; + */ + boolean hasScale(); + /** + * required int32 scale = 2; + */ + int getScale(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal} + */ + public static final class Decimal extends + com.google.protobuf.GeneratedMessage + implements DecimalOrBuilder { + // Use Decimal.newBuilder() to construct. + private Decimal(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Decimal(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Decimal defaultInstance; + public static Decimal getDefaultInstance() { + return defaultInstance; + } + + public Decimal getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Decimal( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + unscaled_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + scale_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Decimal parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Decimal(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes unscaled = 1; + public static final int UNSCALED_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString unscaled_; + /** + * required bytes unscaled = 1; + */ + public boolean hasUnscaled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes unscaled = 1; + */ + public com.google.protobuf.ByteString getUnscaled() { + return unscaled_; + } + + // required int32 scale = 2; + public static final int SCALE_FIELD_NUMBER = 2; + private int scale_; + /** + * required int32 scale = 2; + */ + public boolean hasScale() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 scale = 2; + */ + public int getScale() { + return scale_; + } + + private void initFields() { + unscaled_ = com.google.protobuf.ByteString.EMPTY; + scale_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUnscaled()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasScale()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, unscaled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt32(2, scale_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, unscaled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, scale_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + unscaled_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + scale_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.unscaled_ = unscaled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.scale_ = scale_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) return this; + if (other.hasUnscaled()) { + setUnscaled(other.getUnscaled()); + } + if (other.hasScale()) { + setScale(other.getScale()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUnscaled()) { + + return false; + } + if (!hasScale()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes unscaled = 1; + private com.google.protobuf.ByteString unscaled_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes unscaled = 1; + */ + public boolean hasUnscaled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes unscaled = 1; + */ + public com.google.protobuf.ByteString getUnscaled() { + return unscaled_; + } + /** + * required bytes unscaled = 1; + */ + public Builder setUnscaled(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + unscaled_ = value; + onChanged(); + return this; + } + /** + * required bytes unscaled = 1; + */ + public Builder clearUnscaled() { + bitField0_ = (bitField0_ & ~0x00000001); + unscaled_ = getDefaultInstance().getUnscaled(); + onChanged(); + return this; + } + + // required int32 scale = 2; + private int scale_ ; + /** + * required int32 scale = 2; + */ + public boolean hasScale() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 scale = 2; + */ + public int getScale() { + return scale_; + } + /** + * required int32 scale = 2; + */ + public Builder setScale(int value) { + bitField0_ |= 0x00000002; + scale_ = value; + onChanged(); + return this; + } + /** + * required int32 scale = 2; + */ + public Builder clearScale() { + bitField0_ = (bitField0_ & ~0x00000002); + scale_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal) + } + + static { + defaultInstance = new Decimal(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal) + } + + private int bitField0_; + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal lowValue_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue() { + return lowValue_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder() { + return lowValue_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal highValue_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue() { + return highValue_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder() { + return highValue_; + } + + private void initFields() { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasLowValue()) { + if (!getLowValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasHighValue()) { + if (!getHighValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getLowValueFieldBuilder(); + getHighValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (lowValueBuilder_ == null) { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } else { + lowValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (highValueBuilder_ == null) { + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } else { + highValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (lowValueBuilder_ == null) { + result.lowValue_ = lowValue_; + } else { + result.lowValue_ = lowValueBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (highValueBuilder_ == null) { + result.highValue_ = highValue_; + } else { + result.highValue_ = highValueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + mergeLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + mergeHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasLowValue()) { + if (!getLowValue().isInitialized()) { + + return false; + } + } + if (hasHighValue()) { + if (!getHighValue().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> lowValueBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue() { + if (lowValueBuilder_ == null) { + return lowValue_; + } else { + return lowValueBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder setLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (lowValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lowValue_ = value; + onChanged(); + } else { + lowValueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder setLowValue( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder builderForValue) { + if (lowValueBuilder_ == null) { + lowValue_ = builderForValue.build(); + onChanged(); + } else { + lowValueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder mergeLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (lowValueBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + lowValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) { + lowValue_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder(lowValue_).mergeFrom(value).buildPartial(); + } else { + lowValue_ = value; + } + onChanged(); + } else { + lowValueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder clearLowValue() { + if (lowValueBuilder_ == null) { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + onChanged(); + } else { + lowValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder getLowValueBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getLowValueFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder() { + if (lowValueBuilder_ != null) { + return lowValueBuilder_.getMessageOrBuilder(); + } else { + return lowValue_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> + getLowValueFieldBuilder() { + if (lowValueBuilder_ == null) { + lowValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder>( + lowValue_, + getParentForChildren(), + isClean()); + lowValue_ = null; + } + return lowValueBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> highValueBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue() { + if (highValueBuilder_ == null) { + return highValue_; + } else { + return highValueBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder setHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (highValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + highValue_ = value; + onChanged(); + } else { + highValueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder setHighValue( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder builderForValue) { + if (highValueBuilder_ == null) { + highValue_ = builderForValue.build(); + onChanged(); + } else { + highValueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder mergeHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (highValueBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + highValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) { + highValue_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder(highValue_).mergeFrom(value).buildPartial(); + } else { + highValue_ = value; + } + onChanged(); + } else { + highValueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder clearHighValue() { + if (highValueBuilder_ == null) { + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + onChanged(); + } else { + highValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder getHighValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getHighValueFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder() { + if (highValueBuilder_ != null) { + return highValueBuilder_.getMessageOrBuilder(); + } else { + return highValue_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> + getHighValueFieldBuilder() { + if (highValueBuilder_ == null) { + highValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder>( + highValue_, + getParentForChildren(), + isClean()); + highValue_ = null; + } + return highValueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats) + } + + static { + defaultInstance = new DecimalStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats) + } + + private int bitField0_; + // optional int64 last_analyzed = 1; + public static final int LAST_ANALYZED_FIELD_NUMBER = 1; + private long lastAnalyzed_; + /** + * optional int64 last_analyzed = 1; + */ + public boolean hasLastAnalyzed() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 last_analyzed = 1; + */ + public long getLastAnalyzed() { + return lastAnalyzed_; + } + + // required string column_type = 2; + public static final int COLUMN_TYPE_FIELD_NUMBER = 2; + private java.lang.Object columnType_; + /** + * required string column_type = 2; + */ + public boolean hasColumnType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string column_type = 2; + */ + public java.lang.String getColumnType() { + java.lang.Object ref = columnType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + columnType_ = s; + } + return s; + } + } + /** + * required string column_type = 2; + */ + public com.google.protobuf.ByteString + getColumnTypeBytes() { + java.lang.Object ref = columnType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 num_nulls = 3; + public static final int NUM_NULLS_FIELD_NUMBER = 3; + private long numNulls_; + /** + * optional int64 num_nulls = 3; + */ + public boolean hasNumNulls() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 num_nulls = 3; + */ + public long getNumNulls() { + return numNulls_; + } + + // optional int64 num_distinct_values = 4; + public static final int NUM_DISTINCT_VALUES_FIELD_NUMBER = 4; + private long numDistinctValues_; + /** + * optional int64 num_distinct_values = 4; + */ + public boolean hasNumDistinctValues() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 num_distinct_values = 4; + */ + public long getNumDistinctValues() { + return numDistinctValues_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + public static final int BOOL_STATS_FIELD_NUMBER = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats boolStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public boolean hasBoolStats() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats() { + return boolStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder() { + return boolStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + public static final int LONG_STATS_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats longStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public boolean hasLongStats() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats() { + return longStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder() { + return longStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + public static final int DOUBLE_STATS_FIELD_NUMBER = 7; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats doubleStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public boolean hasDoubleStats() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats() { + return doubleStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder() { + return doubleStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + public static final int STRING_STATS_FIELD_NUMBER = 8; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats stringStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public boolean hasStringStats() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats() { + return stringStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder() { + return stringStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + public static final int BINARY_STATS_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats binaryStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public boolean hasBinaryStats() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats() { + return binaryStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder() { + return binaryStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + public static final int DECIMAL_STATS_FIELD_NUMBER = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats decimalStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public boolean hasDecimalStats() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats() { + return decimalStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder() { + return decimalStats_; + } + + private void initFields() { + lastAnalyzed_ = 0L; + columnType_ = ""; + numNulls_ = 0L; + numDistinctValues_ = 0L; + boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasColumnType()) { + memoizedIsInitialized = 0; + return false; + } + if (hasDecimalStats()) { + if (!getDecimalStats().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, lastAnalyzed_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getColumnTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, numNulls_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, numDistinctValues_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, boolStats_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(6, longStats_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, doubleStats_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(8, stringStats_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(9, binaryStats_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeMessage(10, decimalStats_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, lastAnalyzed_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getColumnTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, numNulls_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, numDistinctValues_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, boolStats_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, longStats_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, doubleStats_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, stringStats_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, binaryStats_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, decimalStats_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBoolStatsFieldBuilder(); + getLongStatsFieldBuilder(); + getDoubleStatsFieldBuilder(); + getStringStatsFieldBuilder(); + getBinaryStatsFieldBuilder(); + getDecimalStatsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lastAnalyzed_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + columnType_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + numNulls_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + numDistinctValues_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + if (boolStatsBuilder_ == null) { + boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + } else { + boolStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + if (longStatsBuilder_ == null) { + longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + } else { + longStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + if (doubleStatsBuilder_ == null) { + doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + } else { + doubleStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + if (stringStatsBuilder_ == null) { + stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + } else { + stringStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); + if (binaryStatsBuilder_ == null) { + binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + } else { + binaryStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + if (decimalStatsBuilder_ == null) { + decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + } else { + decimalStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lastAnalyzed_ = lastAnalyzed_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.columnType_ = columnType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.numNulls_ = numNulls_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.numDistinctValues_ = numDistinctValues_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (boolStatsBuilder_ == null) { + result.boolStats_ = boolStats_; + } else { + result.boolStats_ = boolStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + if (longStatsBuilder_ == null) { + result.longStats_ = longStats_; + } else { + result.longStats_ = longStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (doubleStatsBuilder_ == null) { + result.doubleStats_ = doubleStats_; + } else { + result.doubleStats_ = doubleStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + if (stringStatsBuilder_ == null) { + result.stringStats_ = stringStats_; + } else { + result.stringStats_ = stringStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000100; + } + if (binaryStatsBuilder_ == null) { + result.binaryStats_ = binaryStats_; + } else { + result.binaryStats_ = binaryStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000200; + } + if (decimalStatsBuilder_ == null) { + result.decimalStats_ = decimalStats_; + } else { + result.decimalStats_ = decimalStatsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()) return this; + if (other.hasLastAnalyzed()) { + setLastAnalyzed(other.getLastAnalyzed()); + } + if (other.hasColumnType()) { + bitField0_ |= 0x00000002; + columnType_ = other.columnType_; + onChanged(); + } + if (other.hasNumNulls()) { + setNumNulls(other.getNumNulls()); + } + if (other.hasNumDistinctValues()) { + setNumDistinctValues(other.getNumDistinctValues()); + } + if (other.hasBoolStats()) { + mergeBoolStats(other.getBoolStats()); + } + if (other.hasLongStats()) { + mergeLongStats(other.getLongStats()); + } + if (other.hasDoubleStats()) { + mergeDoubleStats(other.getDoubleStats()); + } + if (other.hasStringStats()) { + mergeStringStats(other.getStringStats()); + } + if (other.hasBinaryStats()) { + mergeBinaryStats(other.getBinaryStats()); + } + if (other.hasDecimalStats()) { + mergeDecimalStats(other.getDecimalStats()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasColumnType()) { + + return false; + } + if (hasDecimalStats()) { + if (!getDecimalStats().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 last_analyzed = 1; + private long lastAnalyzed_ ; + /** + * optional int64 last_analyzed = 1; + */ + public boolean hasLastAnalyzed() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 last_analyzed = 1; + */ + public long getLastAnalyzed() { + return lastAnalyzed_; + } + /** + * optional int64 last_analyzed = 1; + */ + public Builder setLastAnalyzed(long value) { + bitField0_ |= 0x00000001; + lastAnalyzed_ = value; + onChanged(); + return this; + } + /** + * optional int64 last_analyzed = 1; + */ + public Builder clearLastAnalyzed() { + bitField0_ = (bitField0_ & ~0x00000001); + lastAnalyzed_ = 0L; + onChanged(); + return this; + } + + // required string column_type = 2; + private java.lang.Object columnType_ = ""; + /** + * required string column_type = 2; + */ + public boolean hasColumnType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string column_type = 2; + */ + public java.lang.String getColumnType() { + java.lang.Object ref = columnType_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + columnType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string column_type = 2; + */ + public com.google.protobuf.ByteString + getColumnTypeBytes() { + java.lang.Object ref = columnType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string column_type = 2; + */ + public Builder setColumnType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + columnType_ = value; + onChanged(); + return this; + } + /** + * required string column_type = 2; + */ + public Builder clearColumnType() { + bitField0_ = (bitField0_ & ~0x00000002); + columnType_ = getDefaultInstance().getColumnType(); + onChanged(); + return this; + } + /** + * required string column_type = 2; + */ + public Builder setColumnTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + columnType_ = value; + onChanged(); + return this; + } + + // optional int64 num_nulls = 3; + private long numNulls_ ; + /** + * optional int64 num_nulls = 3; + */ + public boolean hasNumNulls() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 num_nulls = 3; + */ + public long getNumNulls() { + return numNulls_; + } + /** + * optional int64 num_nulls = 3; + */ + public Builder setNumNulls(long value) { + bitField0_ |= 0x00000004; + numNulls_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_nulls = 3; + */ + public Builder clearNumNulls() { + bitField0_ = (bitField0_ & ~0x00000004); + numNulls_ = 0L; + onChanged(); + return this; + } + + // optional int64 num_distinct_values = 4; + private long numDistinctValues_ ; + /** + * optional int64 num_distinct_values = 4; + */ + public boolean hasNumDistinctValues() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 num_distinct_values = 4; + */ + public long getNumDistinctValues() { + return numDistinctValues_; + } + /** + * optional int64 num_distinct_values = 4; + */ + public Builder setNumDistinctValues(long value) { + bitField0_ |= 0x00000008; + numDistinctValues_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_distinct_values = 4; + */ + public Builder clearNumDistinctValues() { + bitField0_ = (bitField0_ & ~0x00000008); + numDistinctValues_ = 0L; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder> boolStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public boolean hasBoolStats() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats() { + if (boolStatsBuilder_ == null) { + return boolStats_; + } else { + return boolStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder setBoolStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats value) { + if (boolStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + boolStats_ = value; + onChanged(); + } else { + boolStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder setBoolStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder builderForValue) { + if (boolStatsBuilder_ == null) { + boolStats_ = builderForValue.build(); + onChanged(); + } else { + boolStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder mergeBoolStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats value) { + if (boolStatsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + boolStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance()) { + boolStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder(boolStats_).mergeFrom(value).buildPartial(); + } else { + boolStats_ = value; + } + onChanged(); + } else { + boolStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder clearBoolStats() { + if (boolStatsBuilder_ == null) { + boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + onChanged(); + } else { + boolStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder getBoolStatsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getBoolStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder() { + if (boolStatsBuilder_ != null) { + return boolStatsBuilder_.getMessageOrBuilder(); + } else { + return boolStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder> + getBoolStatsFieldBuilder() { + if (boolStatsBuilder_ == null) { + boolStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder>( + boolStats_, + getParentForChildren(), + isClean()); + boolStats_ = null; + } + return boolStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder> longStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public boolean hasLongStats() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats() { + if (longStatsBuilder_ == null) { + return longStats_; + } else { + return longStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder setLongStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats value) { + if (longStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + longStats_ = value; + onChanged(); + } else { + longStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder setLongStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder builderForValue) { + if (longStatsBuilder_ == null) { + longStats_ = builderForValue.build(); + onChanged(); + } else { + longStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder mergeLongStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats value) { + if (longStatsBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + longStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance()) { + longStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.newBuilder(longStats_).mergeFrom(value).buildPartial(); + } else { + longStats_ = value; + } + onChanged(); + } else { + longStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder clearLongStats() { + if (longStatsBuilder_ == null) { + longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + onChanged(); + } else { + longStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder getLongStatsBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getLongStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder() { + if (longStatsBuilder_ != null) { + return longStatsBuilder_.getMessageOrBuilder(); + } else { + return longStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder> + getLongStatsFieldBuilder() { + if (longStatsBuilder_ == null) { + longStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder>( + longStats_, + getParentForChildren(), + isClean()); + longStats_ = null; + } + return longStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder> doubleStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public boolean hasDoubleStats() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats() { + if (doubleStatsBuilder_ == null) { + return doubleStats_; + } else { + return doubleStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder setDoubleStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats value) { + if (doubleStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + doubleStats_ = value; + onChanged(); + } else { + doubleStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder setDoubleStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder builderForValue) { + if (doubleStatsBuilder_ == null) { + doubleStats_ = builderForValue.build(); + onChanged(); + } else { + doubleStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder mergeDoubleStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats value) { + if (doubleStatsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + doubleStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance()) { + doubleStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder(doubleStats_).mergeFrom(value).buildPartial(); + } else { + doubleStats_ = value; + } + onChanged(); + } else { + doubleStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder clearDoubleStats() { + if (doubleStatsBuilder_ == null) { + doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + onChanged(); + } else { + doubleStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder getDoubleStatsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getDoubleStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder() { + if (doubleStatsBuilder_ != null) { + return doubleStatsBuilder_.getMessageOrBuilder(); + } else { + return doubleStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder> + getDoubleStatsFieldBuilder() { + if (doubleStatsBuilder_ == null) { + doubleStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder>( + doubleStats_, + getParentForChildren(), + isClean()); + doubleStats_ = null; + } + return doubleStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> stringStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public boolean hasStringStats() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats() { + if (stringStatsBuilder_ == null) { + return stringStats_; + } else { + return stringStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder setStringStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (stringStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stringStats_ = value; + onChanged(); + } else { + stringStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder setStringStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder builderForValue) { + if (stringStatsBuilder_ == null) { + stringStats_ = builderForValue.build(); + onChanged(); + } else { + stringStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder mergeStringStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (stringStatsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080) && + stringStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) { + stringStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(stringStats_).mergeFrom(value).buildPartial(); + } else { + stringStats_ = value; + } + onChanged(); + } else { + stringStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder clearStringStats() { + if (stringStatsBuilder_ == null) { + stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + onChanged(); + } else { + stringStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder getStringStatsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return getStringStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder() { + if (stringStatsBuilder_ != null) { + return stringStatsBuilder_.getMessageOrBuilder(); + } else { + return stringStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> + getStringStatsFieldBuilder() { + if (stringStatsBuilder_ == null) { + stringStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder>( + stringStats_, + getParentForChildren(), + isClean()); + stringStats_ = null; + } + return stringStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> binaryStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public boolean hasBinaryStats() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats() { + if (binaryStatsBuilder_ == null) { + return binaryStats_; + } else { + return binaryStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder setBinaryStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (binaryStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + binaryStats_ = value; + onChanged(); + } else { + binaryStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder setBinaryStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder builderForValue) { + if (binaryStatsBuilder_ == null) { + binaryStats_ = builderForValue.build(); + onChanged(); + } else { + binaryStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder mergeBinaryStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (binaryStatsBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + binaryStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) { + binaryStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(binaryStats_).mergeFrom(value).buildPartial(); + } else { + binaryStats_ = value; + } + onChanged(); + } else { + binaryStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder clearBinaryStats() { + if (binaryStatsBuilder_ == null) { + binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + onChanged(); + } else { + binaryStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder getBinaryStatsBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getBinaryStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder() { + if (binaryStatsBuilder_ != null) { + return binaryStatsBuilder_.getMessageOrBuilder(); + } else { + return binaryStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> + getBinaryStatsFieldBuilder() { + if (binaryStatsBuilder_ == null) { + binaryStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder>( + binaryStats_, + getParentForChildren(), + isClean()); + binaryStats_ = null; + } + return binaryStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder> decimalStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public boolean hasDecimalStats() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats() { + if (decimalStatsBuilder_ == null) { + return decimalStats_; + } else { + return decimalStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder setDecimalStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats value) { + if (decimalStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + decimalStats_ = value; + onChanged(); + } else { + decimalStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder setDecimalStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder builderForValue) { + if (decimalStatsBuilder_ == null) { + decimalStats_ = builderForValue.build(); + onChanged(); + } else { + decimalStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder mergeDecimalStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats value) { + if (decimalStatsBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200) && + decimalStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance()) { + decimalStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder(decimalStats_).mergeFrom(value).buildPartial(); + } else { + decimalStats_ = value; + } + onChanged(); + } else { + decimalStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder clearDecimalStats() { + if (decimalStatsBuilder_ == null) { + decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + onChanged(); + } else { + decimalStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder getDecimalStatsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getDecimalStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder() { + if (decimalStatsBuilder_ != null) { + return decimalStatsBuilder_.getMessageOrBuilder(); + } else { + return decimalStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder> + getDecimalStatsFieldBuilder() { + if (decimalStatsBuilder_ == null) { + decimalStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder>( + decimalStats_, + getParentForChildren(), + isClean()); + decimalStats_ = null; + } + return decimalStatsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) + } + + static { + defaultInstance = new ColumnStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) + } + + public interface DatabaseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string description = 1; + /** + * optional string description = 1; + */ + boolean hasDescription(); + /** + * optional string description = 1; + */ + java.lang.String getDescription(); + /** + * optional string description = 1; + */ + com.google.protobuf.ByteString + getDescriptionBytes(); + + // optional string uri = 2; + /** + * optional string uri = 2; + */ + boolean hasUri(); + /** + * optional string uri = 2; + */ + java.lang.String getUri(); + /** + * optional string uri = 2; + */ + com.google.protobuf.ByteString + getUriBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + boolean hasPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); + + // optional string owner_name = 5; + /** + * optional string owner_name = 5; + */ + boolean hasOwnerName(); + /** + * optional string owner_name = 5; + */ + java.lang.String getOwnerName(); + /** + * optional string owner_name = 5; + */ + com.google.protobuf.ByteString + getOwnerNameBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + boolean hasOwnerType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Database} + */ + public static final class Database extends + com.google.protobuf.GeneratedMessage + implements DatabaseOrBuilder { + // Use Database.newBuilder() to construct. + private Database(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Database(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Database defaultInstance; + public static Database getDefaultInstance() { + return defaultInstance; + } + + public Database getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Database( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + description_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + uri_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = privileges_.toBuilder(); + } + privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(privileges_); + privileges_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 42: { + bitField0_ |= 0x00000010; + ownerName_ = input.readBytes(); + break; + } + case 48: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(6, rawValue); + } else { + bitField0_ |= 0x00000020; + ownerType_ = value; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Database parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Database(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string description = 1; + public static final int DESCRIPTION_FIELD_NUMBER = 1; + private java.lang.Object description_; + /** + * optional string description = 1; + */ + public boolean hasDescription() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string description = 1; + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + description_ = s; + } + return s; + } + } + /** + * optional string description = 1; + */ + public com.google.protobuf.ByteString + getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string uri = 2; + public static final int URI_FIELD_NUMBER = 2; + private java.lang.Object uri_; + /** + * optional string uri = 2; + */ + public boolean hasUri() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string uri = 2; + */ + public java.lang.String getUri() { + java.lang.Object ref = uri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + uri_ = s; + } + return s; + } + } + /** + * optional string uri = 2; + */ + public com.google.protobuf.ByteString + getUriBytes() { + java.lang.Object ref = uri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + uri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + public static final int PARAMETERS_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + public static final int PRIVILEGES_FIELD_NUMBER = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + return privileges_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + return privileges_; + } + + // optional string owner_name = 5; + public static final int OWNER_NAME_FIELD_NUMBER = 5; + private java.lang.Object ownerName_; + /** + * optional string owner_name = 5; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner_name = 5; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ownerName_ = s; + } + return s; + } + } + /** + * optional string owner_name = 5; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + public static final int OWNER_TYPE_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public boolean hasOwnerType() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { + return ownerType_; + } + + private void initFields() { + description_ = ""; + uri_ = ""; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + ownerName_ = ""; + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getDescriptionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUriBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, parameters_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, privileges_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getOwnerNameBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeEnum(6, ownerType_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getDescriptionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUriBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, parameters_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, privileges_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getOwnerNameBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, ownerType_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Database} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DatabaseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getParametersFieldBuilder(); + getPrivilegesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + description_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + uri_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + ownerName_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.description_ = description_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.uri_ = uri_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (privilegesBuilder_ == null) { + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.ownerName_ = ownerName_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.ownerType_ = ownerType_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.getDefaultInstance()) return this; + if (other.hasDescription()) { + bitField0_ |= 0x00000001; + description_ = other.description_; + onChanged(); + } + if (other.hasUri()) { + bitField0_ |= 0x00000002; + uri_ = other.uri_; + onChanged(); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + if (other.hasPrivileges()) { + mergePrivileges(other.getPrivileges()); + } + if (other.hasOwnerName()) { + bitField0_ |= 0x00000010; + ownerName_ = other.ownerName_; + onChanged(); + } + if (other.hasOwnerType()) { + setOwnerType(other.getOwnerType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string description = 1; + private java.lang.Object description_ = ""; + /** + * optional string description = 1; + */ + public boolean hasDescription() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string description = 1; + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string description = 1; + */ + public com.google.protobuf.ByteString + getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string description = 1; + */ + public Builder setDescription( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + description_ = value; + onChanged(); + return this; + } + /** + * optional string description = 1; + */ + public Builder clearDescription() { + bitField0_ = (bitField0_ & ~0x00000001); + description_ = getDefaultInstance().getDescription(); + onChanged(); + return this; + } + /** + * optional string description = 1; + */ + public Builder setDescriptionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + description_ = value; + onChanged(); + return this; + } + + // optional string uri = 2; + private java.lang.Object uri_ = ""; + /** + * optional string uri = 2; + */ + public boolean hasUri() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string uri = 2; + */ + public java.lang.String getUri() { + java.lang.Object ref = uri_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + uri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string uri = 2; + */ + public com.google.protobuf.ByteString + getUriBytes() { + java.lang.Object ref = uri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + uri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string uri = 2; + */ + public Builder setUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + uri_ = value; + onChanged(); + return this; + } + /** + * optional string uri = 2; + */ + public Builder clearUri() { + bitField0_ = (bitField0_ & ~0x00000002); + uri_ = getDefaultInstance().getUri(); + onChanged(); + return this; + } + /** + * optional string uri = 2; + */ + public Builder setUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + uri_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + if (privilegesBuilder_ == null) { + return privileges_; + } else { + return privilegesBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + privileges_ = value; + onChanged(); + } else { + privilegesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder setPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { + if (privilegesBuilder_ == null) { + privileges_ = builderForValue.build(); + onChanged(); + } else { + privilegesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { + privileges_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); + } else { + privileges_ = value; + } + onChanged(); + } else { + privilegesBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getPrivilegesFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilder(); + } else { + return privileges_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( + privileges_, + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // optional string owner_name = 5; + private java.lang.Object ownerName_ = ""; + /** + * optional string owner_name = 5; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner_name = 5; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ownerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner_name = 5; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner_name = 5; + */ + public Builder setOwnerName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + ownerName_ = value; + onChanged(); + return this; + } + /** + * optional string owner_name = 5; + */ + public Builder clearOwnerName() { + bitField0_ = (bitField0_ & ~0x00000010); + ownerName_ = getDefaultInstance().getOwnerName(); + onChanged(); + return this; + } + /** + * optional string owner_name = 5; + */ + public Builder setOwnerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + ownerName_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public boolean hasOwnerType() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { + return ownerType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public Builder setOwnerType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + ownerType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public Builder clearOwnerType() { + bitField0_ = (bitField0_ & ~0x00000020); + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Database) + } + + static { + defaultInstance = new Database(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Database) + } + + public interface FieldSchemaOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // required string type = 2; + /** + * required string type = 2; + */ + boolean hasType(); + /** + * required string type = 2; + */ + java.lang.String getType(); + /** + * required string type = 2; + */ + com.google.protobuf.ByteString + getTypeBytes(); + + // optional string comment = 3; + /** + * optional string comment = 3; + */ + boolean hasComment(); + /** + * optional string comment = 3; + */ + java.lang.String getComment(); + /** + * optional string comment = 3; + */ + com.google.protobuf.ByteString + getCommentBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.FieldSchema} + */ + public static final class FieldSchema extends + com.google.protobuf.GeneratedMessage + implements FieldSchemaOrBuilder { + // Use FieldSchema.newBuilder() to construct. + private FieldSchema(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FieldSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FieldSchema defaultInstance; + public static FieldSchema getDefaultInstance() { + return defaultInstance; + } + + public FieldSchema getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FieldSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + type_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + comment_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FieldSchema(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private java.lang.Object type_; + /** + * required string type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string type = 2; + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + type_ = s; + } + return s; + } + } + /** + * required string type = 2; + */ + public com.google.protobuf.ByteString + getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string comment = 3; + public static final int COMMENT_FIELD_NUMBER = 3; + private java.lang.Object comment_; + /** + * optional string comment = 3; + */ + public boolean hasComment() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string comment = 3; + */ + public java.lang.String getComment() { + java.lang.Object ref = comment_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + comment_ = s; + } + return s; + } + } + /** + * optional string comment = 3; + */ + public com.google.protobuf.ByteString + getCommentBytes() { + java.lang.Object ref = comment_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + comment_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + name_ = ""; + type_ = ""; + comment_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getCommentBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getCommentBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.FieldSchema} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + comment_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.comment_ = comment_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasType()) { + bitField0_ |= 0x00000002; + type_ = other.type_; + onChanged(); + } + if (other.hasComment()) { + bitField0_ |= 0x00000004; + comment_ = other.comment_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (!hasType()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // required string type = 2; + private java.lang.Object type_ = ""; + /** + * required string type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string type = 2; + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string type = 2; + */ + public com.google.protobuf.ByteString + getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string type = 2; + */ + public Builder setType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + /** + * required string type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = getDefaultInstance().getType(); + onChanged(); + return this; + } + /** + * required string type = 2; + */ + public Builder setTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + + // optional string comment = 3; + private java.lang.Object comment_ = ""; + /** + * optional string comment = 3; + */ + public boolean hasComment() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string comment = 3; + */ + public java.lang.String getComment() { + java.lang.Object ref = comment_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + comment_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string comment = 3; + */ + public com.google.protobuf.ByteString + getCommentBytes() { + java.lang.Object ref = comment_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + comment_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string comment = 3; + */ + public Builder setComment( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + comment_ = value; + onChanged(); + return this; + } + /** + * optional string comment = 3; + */ + public Builder clearComment() { + bitField0_ = (bitField0_ & ~0x00000004); + comment_ = getDefaultInstance().getComment(); + onChanged(); + return this; + } + /** + * optional string comment = 3; + */ + public Builder setCommentBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + comment_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.FieldSchema) + } + + static { + defaultInstance = new FieldSchema(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.FieldSchema) + } + + public interface ParameterEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required string value = 2; + /** + * required string value = 2; + */ + boolean hasValue(); + /** + * required string value = 2; + */ + java.lang.String getValue(); + /** + * required string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ParameterEntry} + */ + public static final class ParameterEntry extends + com.google.protobuf.GeneratedMessage + implements ParameterEntryOrBuilder { + // Use ParameterEntry.newBuilder() to construct. + private ParameterEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ParameterEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ParameterEntry defaultInstance; + public static ParameterEntry getDefaultInstance() { + return defaultInstance; + } + + public ParameterEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ParameterEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + value_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ParameterEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ParameterEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = ""; + value_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ParameterEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasValue()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required string value = 2; + private java.lang.Object value_ = ""; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string value = 2; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ParameterEntry) + } + + static { + defaultInstance = new ParameterEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ParameterEntry) + } + + public interface ParametersOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + java.util.List + getParameterList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + int getParameterCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + java.util.List + getParameterOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Parameters} + */ + public static final class Parameters extends + com.google.protobuf.GeneratedMessage + implements ParametersOrBuilder { + // Use Parameters.newBuilder() to construct. + private Parameters(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Parameters(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Parameters defaultInstance; + public static Parameters getDefaultInstance() { + return defaultInstance; + } + + public Parameters getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Parameters( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + parameter_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = java.util.Collections.unmodifiableList(parameter_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Parameters parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Parameters(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + public static final int PARAMETER_FIELD_NUMBER = 1; + private java.util.List parameter_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List getParameterList() { + return parameter_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List + getParameterOrBuilderList() { + return parameter_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public int getParameterCount() { + return parameter_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index) { + return parameter_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( + int index) { + return parameter_.get(index); + } + + private void initFields() { + parameter_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getParameterCount(); i++) { + if (!getParameter(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < parameter_.size(); i++) { + output.writeMessage(1, parameter_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < parameter_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, parameter_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Parameters} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getParameterFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (parameterBuilder_ == null) { + parameter_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + parameterBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters(this); + int from_bitField0_ = bitField0_; + if (parameterBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = java.util.Collections.unmodifiableList(parameter_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.parameter_ = parameter_; + } else { + result.parameter_ = parameterBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) return this; + if (parameterBuilder_ == null) { + if (!other.parameter_.isEmpty()) { + if (parameter_.isEmpty()) { + parameter_ = other.parameter_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureParameterIsMutable(); + parameter_.addAll(other.parameter_); + } + onChanged(); + } + } else { + if (!other.parameter_.isEmpty()) { + if (parameterBuilder_.isEmpty()) { + parameterBuilder_.dispose(); + parameterBuilder_ = null; + parameter_ = other.parameter_; + bitField0_ = (bitField0_ & ~0x00000001); + parameterBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getParameterFieldBuilder() : null; + } else { + parameterBuilder_.addAllMessages(other.parameter_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getParameterCount(); i++) { + if (!getParameter(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + private java.util.List parameter_ = + java.util.Collections.emptyList(); + private void ensureParameterIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = new java.util.ArrayList(parameter_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder> parameterBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List getParameterList() { + if (parameterBuilder_ == null) { + return java.util.Collections.unmodifiableList(parameter_); + } else { + return parameterBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public int getParameterCount() { + if (parameterBuilder_ == null) { + return parameter_.size(); + } else { + return parameterBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index) { + if (parameterBuilder_ == null) { + return parameter_.get(index); + } else { + return parameterBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder setParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { + if (parameterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParameterIsMutable(); + parameter_.set(index, value); + onChanged(); + } else { + parameterBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder setParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.set(index, builderForValue.build()); + onChanged(); + } else { + parameterBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { + if (parameterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParameterIsMutable(); + parameter_.add(value); + onChanged(); + } else { + parameterBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { + if (parameterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParameterIsMutable(); + parameter_.add(index, value); + onChanged(); + } else { + parameterBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.add(builderForValue.build()); + onChanged(); + } else { + parameterBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.add(index, builderForValue.build()); + onChanged(); + } else { + parameterBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addAllParameter( + java.lang.Iterable values) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + super.addAll(values, parameter_); + onChanged(); + } else { + parameterBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder clearParameter() { + if (parameterBuilder_ == null) { + parameter_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + parameterBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder removeParameter(int index) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.remove(index); + onChanged(); + } else { + parameterBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder getParameterBuilder( + int index) { + return getParameterFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( + int index) { + if (parameterBuilder_ == null) { + return parameter_.get(index); } else { + return parameterBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List + getParameterOrBuilderList() { + if (parameterBuilder_ != null) { + return parameterBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(parameter_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder addParameterBuilder() { + return getParameterFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder addParameterBuilder( + int index) { + return getParameterFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List + getParameterBuilderList() { + return getParameterFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder> + getParameterFieldBuilder() { + if (parameterBuilder_ == null) { + parameterBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder>( + parameter_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + parameter_ = null; + } + return parameterBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Parameters) + } + + static { + defaultInstance = new Parameters(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Parameters) + } + + public interface PartitionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 create_time = 1; + /** + * optional int64 create_time = 1; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 1; + */ + long getCreateTime(); + + // optional int64 last_access_time = 2; + /** + * optional int64 last_access_time = 2; + */ + boolean hasLastAccessTime(); + /** + * optional int64 last_access_time = 2; + */ + long getLastAccessTime(); + + // optional string location = 3; + /** + * optional string location = 3; + */ + boolean hasLocation(); + /** + * optional string location = 3; + */ + java.lang.String getLocation(); + /** + * optional string location = 3; + */ + com.google.protobuf.ByteString + getLocationBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + boolean hasSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); + + // required bytes sd_hash = 5; + /** + * required bytes sd_hash = 5; + */ + boolean hasSdHash(); + /** + * required bytes sd_hash = 5; + */ + com.google.protobuf.ByteString getSdHash(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} + */ + public static final class Partition extends + com.google.protobuf.GeneratedMessage + implements PartitionOrBuilder { + // Use Partition.newBuilder() to construct. + private Partition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Partition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Partition defaultInstance; + public static Partition getDefaultInstance() { + return defaultInstance; + } + + public Partition getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Partition( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + createTime_ = input.readInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + lastAccessTime_ = input.readInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + location_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = sdParameters_.toBuilder(); + } + sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sdParameters_); + sdParameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 42: { + bitField0_ |= 0x00000010; + sdHash_ = input.readBytes(); + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Partition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Partition(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 create_time = 1; + public static final int CREATE_TIME_FIELD_NUMBER = 1; + private long createTime_; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + + // optional int64 last_access_time = 2; + public static final int LAST_ACCESS_TIME_FIELD_NUMBER = 2; + private long lastAccessTime_; + /** + * optional int64 last_access_time = 2; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 last_access_time = 2; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + + // optional string location = 3; + public static final int LOCATION_FIELD_NUMBER = 3; + private java.lang.Object location_; + /** + * optional string location = 3; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string location = 3; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + location_ = s; + } + return s; + } + } + /** + * optional string location = 3; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + public static final int SD_PARAMETERS_FIELD_NUMBER = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + return sdParameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + return sdParameters_; + } + + // required bytes sd_hash = 5; + public static final int SD_HASH_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString sdHash_; + /** + * required bytes sd_hash = 5; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required bytes sd_hash = 5; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + public static final int PARAMETERS_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + private void initFields() { + createTime_ = 0L; + lastAccessTime_ = 0L; + location_ = ""; + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSdHash()) { + memoizedIsInitialized = 0; + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, lastAccessTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getLocationBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, sdParameters_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, sdHash_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(6, parameters_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, lastAccessTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getLocationBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, sdParameters_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, sdHash_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, parameters_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSdParametersFieldBuilder(); + getParametersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + lastAccessTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + location_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.lastAccessTime_ = lastAccessTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.location_ = location_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (sdParametersBuilder_ == null) { + result.sdParameters_ = sdParameters_; + } else { + result.sdParameters_ = sdParametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.sdHash_ = sdHash_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.getDefaultInstance()) return this; + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasLastAccessTime()) { + setLastAccessTime(other.getLastAccessTime()); + } + if (other.hasLocation()) { + bitField0_ |= 0x00000004; + location_ = other.location_; + onChanged(); + } + if (other.hasSdParameters()) { + mergeSdParameters(other.getSdParameters()); + } + if (other.hasSdHash()) { + setSdHash(other.getSdHash()); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSdHash()) { + + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 create_time = 1; + private long createTime_ ; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 1; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000001; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 1; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 last_access_time = 2; + private long lastAccessTime_ ; + /** + * optional int64 last_access_time = 2; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 last_access_time = 2; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + /** + * optional int64 last_access_time = 2; + */ + public Builder setLastAccessTime(long value) { + bitField0_ |= 0x00000002; + lastAccessTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 last_access_time = 2; + */ + public Builder clearLastAccessTime() { + bitField0_ = (bitField0_ & ~0x00000002); + lastAccessTime_ = 0L; + onChanged(); + return this; + } + + // optional string location = 3; + private java.lang.Object location_ = ""; + /** + * optional string location = 3; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string location = 3; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string location = 3; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string location = 3; + */ + public Builder setLocation( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + location_ = value; + onChanged(); + return this; + } + /** + * optional string location = 3; + */ + public Builder clearLocation() { + bitField0_ = (bitField0_ & ~0x00000004); + location_ = getDefaultInstance().getLocation(); + onChanged(); + return this; + } + /** + * optional string location = 3; + */ + public Builder setLocationBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + location_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + if (sdParametersBuilder_ == null) { + return sdParameters_; + } else { + return sdParametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sdParameters_ = value; + onChanged(); + } else { + sdParametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (sdParametersBuilder_ == null) { + sdParameters_ = builderForValue.build(); + onChanged(); + } else { + sdParametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + sdParameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); + } else { + sdParameters_ = value; + } + onChanged(); + } else { + sdParametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder clearSdParameters() { + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getSdParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + if (sdParametersBuilder_ != null) { + return sdParametersBuilder_.getMessageOrBuilder(); + } else { + return sdParameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getSdParametersFieldBuilder() { + if (sdParametersBuilder_ == null) { + sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + sdParameters_, + getParentForChildren(), + isClean()); + sdParameters_ = null; + } + return sdParametersBuilder_; + } + + // required bytes sd_hash = 5; + private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes sd_hash = 5; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required bytes sd_hash = 5; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + /** + * required bytes sd_hash = 5; + */ + public Builder setSdHash(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + sdHash_ = value; + onChanged(); + return this; + } + /** + * required bytes sd_hash = 5; + */ + public Builder clearSdHash() { + bitField0_ = (bitField0_ & ~0x00000010); + sdHash_ = getDefaultInstance().getSdHash(); + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Partition) + } + + static { + defaultInstance = new Partition(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Partition) + } + + public interface PrincipalPrivilegeSetEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string principal_name = 1; + /** + * required string principal_name = 1; + */ + boolean hasPrincipalName(); + /** + * required string principal_name = 1; + */ + java.lang.String getPrincipalName(); + /** + * required string principal_name = 1; + */ + com.google.protobuf.ByteString + getPrincipalNameBytes(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + java.util.List + getPrivilegesList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + int getPrivilegesCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + java.util.List + getPrivilegesOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry} + */ + public static final class PrincipalPrivilegeSetEntry extends + com.google.protobuf.GeneratedMessage + implements PrincipalPrivilegeSetEntryOrBuilder { + // Use PrincipalPrivilegeSetEntry.newBuilder() to construct. + private PrincipalPrivilegeSetEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PrincipalPrivilegeSetEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PrincipalPrivilegeSetEntry defaultInstance; + public static PrincipalPrivilegeSetEntry getDefaultInstance() { + return defaultInstance; + } + + public PrincipalPrivilegeSetEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PrincipalPrivilegeSetEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + principalName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + privileges_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = java.util.Collections.unmodifiableList(privileges_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PrincipalPrivilegeSetEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrincipalPrivilegeSetEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string principal_name = 1; + public static final int PRINCIPAL_NAME_FIELD_NUMBER = 1; + private java.lang.Object principalName_; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + principalName_ = s; + } + return s; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + public static final int PRIVILEGES_FIELD_NUMBER = 2; + private java.util.List privileges_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List getPrivilegesList() { + return privileges_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List + getPrivilegesOrBuilderList() { + return privileges_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public int getPrivilegesCount() { + return privileges_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index) { + return privileges_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( + int index) { + return privileges_.get(index); + } + + private void initFields() { + principalName_ = ""; + privileges_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPrincipalName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPrincipalNameBytes()); + } + for (int i = 0; i < privileges_.size(); i++) { + output.writeMessage(2, privileges_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPrincipalNameBytes()); + } + for (int i = 0; i < privileges_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, privileges_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getPrivilegesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + principalName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (privilegesBuilder_ == null) { + privileges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + privilegesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.principalName_ = principalName_; + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = java.util.Collections.unmodifiableList(privileges_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()) return this; + if (other.hasPrincipalName()) { + bitField0_ |= 0x00000001; + principalName_ = other.principalName_; + onChanged(); + } + if (privilegesBuilder_ == null) { + if (!other.privileges_.isEmpty()) { + if (privileges_.isEmpty()) { + privileges_ = other.privileges_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePrivilegesIsMutable(); + privileges_.addAll(other.privileges_); + } + onChanged(); + } + } else { + if (!other.privileges_.isEmpty()) { + if (privilegesBuilder_.isEmpty()) { + privilegesBuilder_.dispose(); + privilegesBuilder_ = null; + privileges_ = other.privileges_; + bitField0_ = (bitField0_ & ~0x00000002); + privilegesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getPrivilegesFieldBuilder() : null; + } else { + privilegesBuilder_.addAllMessages(other.privileges_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPrincipalName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string principal_name = 1; + private java.lang.Object principalName_ = ""; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + principalName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder clearPrincipalName() { + bitField0_ = (bitField0_ & ~0x00000001); + principalName_ = getDefaultInstance().getPrincipalName(); + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + private java.util.List privileges_ = + java.util.Collections.emptyList(); + private void ensurePrivilegesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = new java.util.ArrayList(privileges_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder> privilegesBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List getPrivilegesList() { + if (privilegesBuilder_ == null) { + return java.util.Collections.unmodifiableList(privileges_); + } else { + return privilegesBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public int getPrivilegesCount() { + if (privilegesBuilder_ == null) { + return privileges_.size(); + } else { + return privilegesBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index) { + if (privilegesBuilder_ == null) { + return privileges_.get(index); + } else { + return privilegesBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder setPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrivilegesIsMutable(); + privileges_.set(index, value); + onChanged(); + } else { + privilegesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder setPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.set(index, builderForValue.build()); + onChanged(); + } else { + privilegesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrivilegesIsMutable(); + privileges_.add(value); + onChanged(); + } else { + privilegesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrivilegesIsMutable(); + privileges_.add(index, value); + onChanged(); + } else { + privilegesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.add(builderForValue.build()); + onChanged(); + } else { + privilegesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.add(index, builderForValue.build()); + onChanged(); + } else { + privilegesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addAllPrivileges( + java.lang.Iterable values) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + super.addAll(values, privileges_); + onChanged(); + } else { + privilegesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder removePrivileges(int index) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.remove(index); + onChanged(); + } else { + privilegesBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder getPrivilegesBuilder( + int index) { + return getPrivilegesFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( + int index) { + if (privilegesBuilder_ == null) { + return privileges_.get(index); } else { + return privilegesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List + getPrivilegesOrBuilderList() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(privileges_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder addPrivilegesBuilder() { + return getPrivilegesFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder addPrivilegesBuilder( + int index) { + return getPrivilegesFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List + getPrivilegesBuilderList() { + return getPrivilegesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder>( + privileges_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry) + } + + static { + defaultInstance = new PrincipalPrivilegeSetEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry) + } + + public interface PrincipalPrivilegeSetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + java.util.List + getUsersList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + int getUsersCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + java.util.List + getUsersOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( + int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + java.util.List + getRolesList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + int getRolesCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + java.util.List + getRolesOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet} + */ + public static final class PrincipalPrivilegeSet extends + com.google.protobuf.GeneratedMessage + implements PrincipalPrivilegeSetOrBuilder { + // Use PrincipalPrivilegeSet.newBuilder() to construct. + private PrincipalPrivilegeSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PrincipalPrivilegeSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PrincipalPrivilegeSet defaultInstance; + public static PrincipalPrivilegeSet getDefaultInstance() { + return defaultInstance; + } + + public PrincipalPrivilegeSet getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PrincipalPrivilegeSet( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + users_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + users_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + roles_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + users_ = java.util.Collections.unmodifiableList(users_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = java.util.Collections.unmodifiableList(roles_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PrincipalPrivilegeSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrincipalPrivilegeSet(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + public static final int USERS_FIELD_NUMBER = 1; + private java.util.List users_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List getUsersList() { + return users_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List + getUsersOrBuilderList() { + return users_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public int getUsersCount() { + return users_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index) { + return users_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( + int index) { + return users_.get(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + public static final int ROLES_FIELD_NUMBER = 2; + private java.util.List roles_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List getRolesList() { + return roles_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List + getRolesOrBuilderList() { + return roles_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public int getRolesCount() { + return roles_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { + return roles_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( + int index) { + return roles_.get(index); + } + + private void initFields() { + users_ = java.util.Collections.emptyList(); + roles_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getUsersCount(); i++) { + if (!getUsers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getRolesCount(); i++) { + if (!getRoles(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < users_.size(); i++) { + output.writeMessage(1, users_.get(i)); + } + for (int i = 0; i < roles_.size(); i++) { + output.writeMessage(2, roles_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < users_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, users_.get(i)); + } + for (int i = 0; i < roles_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, roles_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUsersFieldBuilder(); + getRolesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (usersBuilder_ == null) { + users_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + usersBuilder_.clear(); + } + if (rolesBuilder_ == null) { + roles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + rolesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet(this); + int from_bitField0_ = bitField0_; + if (usersBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + users_ = java.util.Collections.unmodifiableList(users_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.users_ = users_; + } else { + result.users_ = usersBuilder_.build(); + } + if (rolesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = java.util.Collections.unmodifiableList(roles_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.roles_ = roles_; + } else { + result.roles_ = rolesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) return this; + if (usersBuilder_ == null) { + if (!other.users_.isEmpty()) { + if (users_.isEmpty()) { + users_ = other.users_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureUsersIsMutable(); + users_.addAll(other.users_); + } + onChanged(); + } + } else { + if (!other.users_.isEmpty()) { + if (usersBuilder_.isEmpty()) { + usersBuilder_.dispose(); + usersBuilder_ = null; + users_ = other.users_; + bitField0_ = (bitField0_ & ~0x00000001); + usersBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getUsersFieldBuilder() : null; + } else { + usersBuilder_.addAllMessages(other.users_); + } + } + } + if (rolesBuilder_ == null) { + if (!other.roles_.isEmpty()) { + if (roles_.isEmpty()) { + roles_ = other.roles_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRolesIsMutable(); + roles_.addAll(other.roles_); + } + onChanged(); + } + } else { + if (!other.roles_.isEmpty()) { + if (rolesBuilder_.isEmpty()) { + rolesBuilder_.dispose(); + rolesBuilder_ = null; + roles_ = other.roles_; + bitField0_ = (bitField0_ & ~0x00000002); + rolesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRolesFieldBuilder() : null; + } else { + rolesBuilder_.addAllMessages(other.roles_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getUsersCount(); i++) { + if (!getUsers(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getRolesCount(); i++) { + if (!getRoles(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + private java.util.List users_ = + java.util.Collections.emptyList(); + private void ensureUsersIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + users_ = new java.util.ArrayList(users_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> usersBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List getUsersList() { + if (usersBuilder_ == null) { + return java.util.Collections.unmodifiableList(users_); + } else { + return usersBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public int getUsersCount() { + if (usersBuilder_ == null) { + return users_.size(); + } else { + return usersBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index) { + if (usersBuilder_ == null) { + return users_.get(index); + } else { + return usersBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder setUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (usersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUsersIsMutable(); + users_.set(index, value); + onChanged(); + } else { + usersBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder setUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.set(index, builderForValue.build()); + onChanged(); + } else { + usersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (usersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUsersIsMutable(); + users_.add(value); + onChanged(); + } else { + usersBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (usersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUsersIsMutable(); + users_.add(index, value); + onChanged(); + } else { + usersBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.add(builderForValue.build()); + onChanged(); + } else { + usersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.add(index, builderForValue.build()); + onChanged(); + } else { + usersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addAllUsers( + java.lang.Iterable values) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + super.addAll(values, users_); + onChanged(); + } else { + usersBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder clearUsers() { + if (usersBuilder_ == null) { + users_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + usersBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder removeUsers(int index) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.remove(index); + onChanged(); + } else { + usersBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getUsersBuilder( + int index) { + return getUsersFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( + int index) { + if (usersBuilder_ == null) { + return users_.get(index); } else { + return usersBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List + getUsersOrBuilderList() { + if (usersBuilder_ != null) { + return usersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(users_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addUsersBuilder() { + return getUsersFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addUsersBuilder( + int index) { + return getUsersFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List + getUsersBuilderList() { + return getUsersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> + getUsersFieldBuilder() { + if (usersBuilder_ == null) { + usersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( + users_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + users_ = null; + } + return usersBuilder_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + private java.util.List roles_ = + java.util.Collections.emptyList(); + private void ensureRolesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = new java.util.ArrayList(roles_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> rolesBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List getRolesList() { + if (rolesBuilder_ == null) { + return java.util.Collections.unmodifiableList(roles_); + } else { + return rolesBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public int getRolesCount() { + if (rolesBuilder_ == null) { + return roles_.size(); + } else { + return rolesBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { + if (rolesBuilder_ == null) { + return roles_.get(index); + } else { + return rolesBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder setRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (rolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRolesIsMutable(); + roles_.set(index, value); + onChanged(); + } else { + rolesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder setRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.set(index, builderForValue.build()); + onChanged(); + } else { + rolesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (rolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRolesIsMutable(); + roles_.add(value); + onChanged(); + } else { + rolesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (rolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRolesIsMutable(); + roles_.add(index, value); + onChanged(); + } else { + rolesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.add(builderForValue.build()); + onChanged(); + } else { + rolesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.add(index, builderForValue.build()); + onChanged(); + } else { + rolesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addAllRoles( + java.lang.Iterable values) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + super.addAll(values, roles_); + onChanged(); + } else { + rolesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder clearRoles() { + if (rolesBuilder_ == null) { + roles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + rolesBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder removeRoles(int index) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.remove(index); + onChanged(); + } else { + rolesBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getRolesBuilder( + int index) { + return getRolesFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( + int index) { + if (rolesBuilder_ == null) { + return roles_.get(index); } else { + return rolesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List + getRolesOrBuilderList() { + if (rolesBuilder_ != null) { + return rolesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(roles_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder() { + return getRolesFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder( + int index) { + return getRolesFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List + getRolesBuilderList() { + return getRolesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> + getRolesFieldBuilder() { + if (rolesBuilder_ == null) { + rolesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( + roles_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + roles_ = null; + } + return rolesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet) + } + + static { + defaultInstance = new PrincipalPrivilegeSet(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet) + } + + public interface PrivilegeGrantInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string privilege = 1; + /** + * optional string privilege = 1; + */ + boolean hasPrivilege(); + /** + * optional string privilege = 1; + */ + java.lang.String getPrivilege(); + /** + * optional string privilege = 1; + */ + com.google.protobuf.ByteString + getPrivilegeBytes(); + + // optional int64 create_time = 2; + /** + * optional int64 create_time = 2; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 2; + */ + long getCreateTime(); + + // optional string grantor = 3; + /** + * optional string grantor = 3; + */ + boolean hasGrantor(); + /** + * optional string grantor = 3; + */ + java.lang.String getGrantor(); + /** + * optional string grantor = 3; + */ + com.google.protobuf.ByteString + getGrantorBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + boolean hasGrantorType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType(); + + // optional bool grant_option = 5; + /** + * optional bool grant_option = 5; + */ + boolean hasGrantOption(); + /** + * optional bool grant_option = 5; + */ + boolean getGrantOption(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo} + */ + public static final class PrivilegeGrantInfo extends + com.google.protobuf.GeneratedMessage + implements PrivilegeGrantInfoOrBuilder { + // Use PrivilegeGrantInfo.newBuilder() to construct. + private PrivilegeGrantInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PrivilegeGrantInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PrivilegeGrantInfo defaultInstance; + public static PrivilegeGrantInfo getDefaultInstance() { + return defaultInstance; + } + + public PrivilegeGrantInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PrivilegeGrantInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + privilege_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + createTime_ = input.readInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + grantor_ = input.readBytes(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + grantorType_ = value; + } + break; + } + case 40: { + bitField0_ |= 0x00000010; + grantOption_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PrivilegeGrantInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrivilegeGrantInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string privilege = 1; + public static final int PRIVILEGE_FIELD_NUMBER = 1; + private java.lang.Object privilege_; + /** + * optional string privilege = 1; + */ + public boolean hasPrivilege() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string privilege = 1; + */ + public java.lang.String getPrivilege() { + java.lang.Object ref = privilege_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + privilege_ = s; + } + return s; + } + } + /** + * optional string privilege = 1; + */ + public com.google.protobuf.ByteString + getPrivilegeBytes() { + java.lang.Object ref = privilege_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + privilege_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 create_time = 2; + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private long createTime_; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + + // optional string grantor = 3; + public static final int GRANTOR_FIELD_NUMBER = 3; + private java.lang.Object grantor_; + /** + * optional string grantor = 3; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string grantor = 3; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + grantor_ = s; + } + return s; + } + } + /** + * optional string grantor = 3; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + public static final int GRANTOR_TYPE_FIELD_NUMBER = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + + // optional bool grant_option = 5; + public static final int GRANT_OPTION_FIELD_NUMBER = 5; + private boolean grantOption_; + /** + * optional bool grant_option = 5; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool grant_option = 5; + */ + public boolean getGrantOption() { + return grantOption_; + } + + private void initFields() { + privilege_ = ""; + createTime_ = 0L; + grantor_ = ""; + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + grantOption_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPrivilegeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getGrantorBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, grantOption_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPrivilegeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getGrantorBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, grantOption_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + privilege_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + grantor_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000008); + grantOption_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.privilege_ = privilege_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.grantor_ = grantor_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.grantorType_ = grantorType_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.grantOption_ = grantOption_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()) return this; + if (other.hasPrivilege()) { + bitField0_ |= 0x00000001; + privilege_ = other.privilege_; + onChanged(); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasGrantor()) { + bitField0_ |= 0x00000004; + grantor_ = other.grantor_; + onChanged(); + } + if (other.hasGrantorType()) { + setGrantorType(other.getGrantorType()); + } + if (other.hasGrantOption()) { + setGrantOption(other.getGrantOption()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string privilege = 1; + private java.lang.Object privilege_ = ""; + /** + * optional string privilege = 1; + */ + public boolean hasPrivilege() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string privilege = 1; + */ + public java.lang.String getPrivilege() { + java.lang.Object ref = privilege_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + privilege_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string privilege = 1; + */ + public com.google.protobuf.ByteString + getPrivilegeBytes() { + java.lang.Object ref = privilege_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + privilege_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string privilege = 1; + */ + public Builder setPrivilege( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + privilege_ = value; + onChanged(); + return this; + } + /** + * optional string privilege = 1; + */ + public Builder clearPrivilege() { + bitField0_ = (bitField0_ & ~0x00000001); + privilege_ = getDefaultInstance().getPrivilege(); + onChanged(); + return this; + } + /** + * optional string privilege = 1; + */ + public Builder setPrivilegeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + privilege_ = value; + onChanged(); + return this; + } + + // optional int64 create_time = 2; + private long createTime_ ; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 2; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000002; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 2; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional string grantor = 3; + private java.lang.Object grantor_ = ""; + /** + * optional string grantor = 3; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string grantor = 3; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + grantor_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string grantor = 3; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string grantor = 3; + */ + public Builder setGrantor( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + grantor_ = value; + onChanged(); + return this; + } + /** + * optional string grantor = 3; + */ + public Builder clearGrantor() { + bitField0_ = (bitField0_ & ~0x00000004); + grantor_ = getDefaultInstance().getGrantor(); + onChanged(); + return this; + } + /** + * optional string grantor = 3; + */ + public Builder setGrantorBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + grantor_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public Builder setGrantorType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + grantorType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public Builder clearGrantorType() { + bitField0_ = (bitField0_ & ~0x00000008); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional bool grant_option = 5; + private boolean grantOption_ ; + /** + * optional bool grant_option = 5; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool grant_option = 5; + */ + public boolean getGrantOption() { + return grantOption_; + } + /** + * optional bool grant_option = 5; + */ + public Builder setGrantOption(boolean value) { + bitField0_ |= 0x00000010; + grantOption_ = value; + onChanged(); + return this; + } + /** + * optional bool grant_option = 5; + */ + public Builder clearGrantOption() { + bitField0_ = (bitField0_ & ~0x00000010); + grantOption_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo) + } + + static { + defaultInstance = new PrivilegeGrantInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo) + } + + public interface RoleGrantInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string principal_name = 1; + /** + * required string principal_name = 1; + */ + boolean hasPrincipalName(); + /** + * required string principal_name = 1; + */ + java.lang.String getPrincipalName(); + /** + * required string principal_name = 1; + */ + com.google.protobuf.ByteString + getPrincipalNameBytes(); + + // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + boolean hasPrincipalType(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType(); + + // optional int64 add_time = 3; + /** + * optional int64 add_time = 3; + */ + boolean hasAddTime(); + /** + * optional int64 add_time = 3; + */ + long getAddTime(); + + // optional string grantor = 4; + /** + * optional string grantor = 4; + */ + boolean hasGrantor(); + /** + * optional string grantor = 4; + */ + java.lang.String getGrantor(); + /** + * optional string grantor = 4; + */ + com.google.protobuf.ByteString + getGrantorBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + boolean hasGrantorType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType(); + + // optional bool grant_option = 6; + /** + * optional bool grant_option = 6; + */ + boolean hasGrantOption(); + /** + * optional bool grant_option = 6; + */ + boolean getGrantOption(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo} + */ + public static final class RoleGrantInfo extends + com.google.protobuf.GeneratedMessage + implements RoleGrantInfoOrBuilder { + // Use RoleGrantInfo.newBuilder() to construct. + private RoleGrantInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RoleGrantInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RoleGrantInfo defaultInstance; + public static RoleGrantInfo getDefaultInstance() { + return defaultInstance; + } + + public RoleGrantInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RoleGrantInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + principalName_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + principalType_ = value; + } + break; + } + case 24: { + bitField0_ |= 0x00000004; + addTime_ = input.readInt64(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + grantor_ = input.readBytes(); + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + grantorType_ = value; + } + break; + } + case 48: { + bitField0_ |= 0x00000020; + grantOption_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RoleGrantInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RoleGrantInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string principal_name = 1; + public static final int PRINCIPAL_NAME_FIELD_NUMBER = 1; + private java.lang.Object principalName_; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + principalName_ = s; + } + return s; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + public static final int PRINCIPAL_TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType principalType_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public boolean hasPrincipalType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType() { + return principalType_; + } + + // optional int64 add_time = 3; + public static final int ADD_TIME_FIELD_NUMBER = 3; + private long addTime_; + /** + * optional int64 add_time = 3; + */ + public boolean hasAddTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 add_time = 3; + */ + public long getAddTime() { + return addTime_; + } + + // optional string grantor = 4; + public static final int GRANTOR_FIELD_NUMBER = 4; + private java.lang.Object grantor_; + /** + * optional string grantor = 4; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string grantor = 4; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + grantor_ = s; + } + return s; + } + } + /** + * optional string grantor = 4; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + public static final int GRANTOR_TYPE_FIELD_NUMBER = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + + // optional bool grant_option = 6; + public static final int GRANT_OPTION_FIELD_NUMBER = 6; + private boolean grantOption_; + /** + * optional bool grant_option = 6; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool grant_option = 6; + */ + public boolean getGrantOption() { + return grantOption_; + } + + private void initFields() { + principalName_ = ""; + principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + addTime_ = 0L; + grantor_ = ""; + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + grantOption_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPrincipalName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPrincipalType()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPrincipalNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, principalType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, addTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getGrantorBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, grantOption_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPrincipalNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, principalType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, addTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getGrantorBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, grantOption_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + principalName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000002); + addTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + grantor_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000010); + grantOption_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.principalName_ = principalName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.principalType_ = principalType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.addTime_ = addTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.grantor_ = grantor_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.grantorType_ = grantorType_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.grantOption_ = grantOption_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()) return this; + if (other.hasPrincipalName()) { + bitField0_ |= 0x00000001; + principalName_ = other.principalName_; + onChanged(); + } + if (other.hasPrincipalType()) { + setPrincipalType(other.getPrincipalType()); + } + if (other.hasAddTime()) { + setAddTime(other.getAddTime()); + } + if (other.hasGrantor()) { + bitField0_ |= 0x00000008; + grantor_ = other.grantor_; + onChanged(); + } + if (other.hasGrantorType()) { + setGrantorType(other.getGrantorType()); + } + if (other.hasGrantOption()) { + setGrantOption(other.getGrantOption()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPrincipalName()) { + + return false; + } + if (!hasPrincipalType()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string principal_name = 1; + private java.lang.Object principalName_ = ""; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + principalName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder clearPrincipalName() { + bitField0_ = (bitField0_ & ~0x00000001); + principalName_ = getDefaultInstance().getPrincipalName(); + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + + // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public boolean hasPrincipalType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType() { + return principalType_; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public Builder setPrincipalType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + principalType_ = value; + onChanged(); + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public Builder clearPrincipalType() { + bitField0_ = (bitField0_ & ~0x00000002); + principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional int64 add_time = 3; + private long addTime_ ; + /** + * optional int64 add_time = 3; + */ + public boolean hasAddTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 add_time = 3; + */ + public long getAddTime() { + return addTime_; + } + /** + * optional int64 add_time = 3; + */ + public Builder setAddTime(long value) { + bitField0_ |= 0x00000004; + addTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 add_time = 3; + */ + public Builder clearAddTime() { + bitField0_ = (bitField0_ & ~0x00000004); + addTime_ = 0L; + onChanged(); + return this; + } + + // optional string grantor = 4; + private java.lang.Object grantor_ = ""; + /** + * optional string grantor = 4; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string grantor = 4; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + grantor_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string grantor = 4; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string grantor = 4; + */ + public Builder setGrantor( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + grantor_ = value; + onChanged(); + return this; + } + /** + * optional string grantor = 4; + */ + public Builder clearGrantor() { + bitField0_ = (bitField0_ & ~0x00000008); + grantor_ = getDefaultInstance().getGrantor(); + onChanged(); + return this; + } + /** + * optional string grantor = 4; + */ + public Builder setGrantorBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + grantor_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public Builder setGrantorType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + grantorType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public Builder clearGrantorType() { + bitField0_ = (bitField0_ & ~0x00000010); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional bool grant_option = 6; + private boolean grantOption_ ; + /** + * optional bool grant_option = 6; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool grant_option = 6; + */ + public boolean getGrantOption() { + return grantOption_; + } + /** + * optional bool grant_option = 6; + */ + public Builder setGrantOption(boolean value) { + bitField0_ |= 0x00000020; + grantOption_ = value; + onChanged(); + return this; + } + /** + * optional bool grant_option = 6; + */ + public Builder clearGrantOption() { + bitField0_ = (bitField0_ & ~0x00000020); + grantOption_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo) + } + + static { + defaultInstance = new RoleGrantInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo) + } + + public interface RoleGrantInfoListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + java.util.List + getGrantInfoList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + int getGrantInfoCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + java.util.List + getGrantInfoOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList} + */ + public static final class RoleGrantInfoList extends + com.google.protobuf.GeneratedMessage + implements RoleGrantInfoListOrBuilder { + // Use RoleGrantInfoList.newBuilder() to construct. + private RoleGrantInfoList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RoleGrantInfoList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RoleGrantInfoList defaultInstance; + public static RoleGrantInfoList getDefaultInstance() { + return defaultInstance; + } + + public RoleGrantInfoList getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RoleGrantInfoList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + grantInfo_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = java.util.Collections.unmodifiableList(grantInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RoleGrantInfoList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RoleGrantInfoList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + public static final int GRANT_INFO_FIELD_NUMBER = 1; + private java.util.List grantInfo_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List getGrantInfoList() { + return grantInfo_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List + getGrantInfoOrBuilderList() { + return grantInfo_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public int getGrantInfoCount() { + return grantInfo_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index) { + return grantInfo_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( + int index) { + return grantInfo_.get(index); + } + + private void initFields() { + grantInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getGrantInfoCount(); i++) { + if (!getGrantInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < grantInfo_.size(); i++) { + output.writeMessage(1, grantInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < grantInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, grantInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGrantInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (grantInfoBuilder_ == null) { + grantInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + grantInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList(this); + int from_bitField0_ = bitField0_; + if (grantInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = java.util.Collections.unmodifiableList(grantInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.grantInfo_ = grantInfo_; + } else { + result.grantInfo_ = grantInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.getDefaultInstance()) return this; + if (grantInfoBuilder_ == null) { + if (!other.grantInfo_.isEmpty()) { + if (grantInfo_.isEmpty()) { + grantInfo_ = other.grantInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureGrantInfoIsMutable(); + grantInfo_.addAll(other.grantInfo_); + } + onChanged(); + } + } else { + if (!other.grantInfo_.isEmpty()) { + if (grantInfoBuilder_.isEmpty()) { + grantInfoBuilder_.dispose(); + grantInfoBuilder_ = null; + grantInfo_ = other.grantInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + grantInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getGrantInfoFieldBuilder() : null; + } else { + grantInfoBuilder_.addAllMessages(other.grantInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getGrantInfoCount(); i++) { + if (!getGrantInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + private java.util.List grantInfo_ = + java.util.Collections.emptyList(); + private void ensureGrantInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = new java.util.ArrayList(grantInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder> grantInfoBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List getGrantInfoList() { + if (grantInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(grantInfo_); + } else { + return grantInfoBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public int getGrantInfoCount() { + if (grantInfoBuilder_ == null) { + return grantInfo_.size(); + } else { + return grantInfoBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index) { + if (grantInfoBuilder_ == null) { + return grantInfo_.get(index); + } else { + return grantInfoBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder setGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { + if (grantInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGrantInfoIsMutable(); + grantInfo_.set(index, value); + onChanged(); + } else { + grantInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder setGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + grantInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { + if (grantInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGrantInfoIsMutable(); + grantInfo_.add(value); + onChanged(); + } else { + grantInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { + if (grantInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGrantInfoIsMutable(); + grantInfo_.add(index, value); + onChanged(); + } else { + grantInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.add(builderForValue.build()); + onChanged(); + } else { + grantInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + grantInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addAllGrantInfo( + java.lang.Iterable values) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + super.addAll(values, grantInfo_); + onChanged(); + } else { + grantInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder clearGrantInfo() { + if (grantInfoBuilder_ == null) { + grantInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + grantInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder removeGrantInfo(int index) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.remove(index); + onChanged(); + } else { + grantInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder getGrantInfoBuilder( + int index) { + return getGrantInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( + int index) { + if (grantInfoBuilder_ == null) { + return grantInfo_.get(index); } else { + return grantInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List + getGrantInfoOrBuilderList() { + if (grantInfoBuilder_ != null) { + return grantInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(grantInfo_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder addGrantInfoBuilder() { + return getGrantInfoFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder addGrantInfoBuilder( + int index) { + return getGrantInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List + getGrantInfoBuilderList() { + return getGrantInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder> + getGrantInfoFieldBuilder() { + if (grantInfoBuilder_ == null) { + grantInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder>( + grantInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + grantInfo_ = null; + } + return grantInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList) + } + + static { + defaultInstance = new RoleGrantInfoList(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList) + } + + public interface RoleListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string role = 1; + /** + * repeated string role = 1; + */ + java.util.List + getRoleList(); + /** + * repeated string role = 1; + */ + int getRoleCount(); + /** + * repeated string role = 1; + */ + java.lang.String getRole(int index); + /** + * repeated string role = 1; + */ + com.google.protobuf.ByteString + getRoleBytes(int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleList} + */ + public static final class RoleList extends + com.google.protobuf.GeneratedMessage + implements RoleListOrBuilder { + // Use RoleList.newBuilder() to construct. + private RoleList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RoleList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RoleList defaultInstance; + public static RoleList getDefaultInstance() { + return defaultInstance; + } + + public RoleList getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RoleList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + role_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.UnmodifiableLazyStringList(role_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RoleList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RoleList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string role = 1; + public static final int ROLE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList role_; + /** + * repeated string role = 1; + */ + public java.util.List + getRoleList() { + return role_; + } + /** + * repeated string role = 1; + */ + public int getRoleCount() { + return role_.size(); + } + /** + * repeated string role = 1; + */ + public java.lang.String getRole(int index) { + return role_.get(index); + } + /** + * repeated string role = 1; + */ + public com.google.protobuf.ByteString + getRoleBytes(int index) { + return role_.getByteString(index); + } + + private void initFields() { + role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < role_.size(); i++) { + output.writeBytes(1, role_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < role_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(role_.getByteString(i)); + } + size += dataSize; + size += 1 * getRoleList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.UnmodifiableLazyStringList( + role_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.role_ = role_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.getDefaultInstance()) return this; + if (!other.role_.isEmpty()) { + if (role_.isEmpty()) { + role_ = other.role_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRoleIsMutable(); + role_.addAll(other.role_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string role = 1; + private com.google.protobuf.LazyStringList role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureRoleIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.LazyStringArrayList(role_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string role = 1; + */ + public java.util.List + getRoleList() { + return java.util.Collections.unmodifiableList(role_); + } + /** + * repeated string role = 1; + */ + public int getRoleCount() { + return role_.size(); + } + /** + * repeated string role = 1; + */ + public java.lang.String getRole(int index) { + return role_.get(index); + } + /** + * repeated string role = 1; + */ + public com.google.protobuf.ByteString + getRoleBytes(int index) { + return role_.getByteString(index); + } + /** + * repeated string role = 1; + */ + public Builder setRole( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRoleIsMutable(); + role_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder addRole( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRoleIsMutable(); + role_.add(value); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder addAllRole( + java.lang.Iterable values) { + ensureRoleIsMutable(); + super.addAll(values, role_); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder clearRole() { + role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder addRoleBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRoleIsMutable(); + role_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleList) + } + + static { + defaultInstance = new RoleList(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleList) + } + + public interface RoleOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 create_time = 1; + /** + * optional int64 create_time = 1; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 1; + */ + long getCreateTime(); + + // optional string owner_name = 2; + /** + * optional string owner_name = 2; + */ + boolean hasOwnerName(); + /** + * optional string owner_name = 2; + */ + java.lang.String getOwnerName(); + /** + * optional string owner_name = 2; + */ + com.google.protobuf.ByteString + getOwnerNameBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Role} + */ + public static final class Role extends + com.google.protobuf.GeneratedMessage + implements RoleOrBuilder { + // Use Role.newBuilder() to construct. + private Role(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Role(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Role defaultInstance; + public static Role getDefaultInstance() { + return defaultInstance; + } + + public Role getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Role( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + createTime_ = input.readInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + ownerName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Role parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Role(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 create_time = 1; + public static final int CREATE_TIME_FIELD_NUMBER = 1; + private long createTime_; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + + // optional string owner_name = 2; + public static final int OWNER_NAME_FIELD_NUMBER = 2; + private java.lang.Object ownerName_; + /** + * optional string owner_name = 2; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string owner_name = 2; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ownerName_ = s; + } + return s; + } + } + /** + * optional string owner_name = 2; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + createTime_ = 0L; + ownerName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getOwnerNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getOwnerNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Role} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + ownerName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ownerName_ = ownerName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.getDefaultInstance()) return this; + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasOwnerName()) { + bitField0_ |= 0x00000002; + ownerName_ = other.ownerName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 create_time = 1; + private long createTime_ ; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 1; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000001; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 1; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional string owner_name = 2; + private java.lang.Object ownerName_ = ""; + /** + * optional string owner_name = 2; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string owner_name = 2; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ownerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner_name = 2; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner_name = 2; + */ + public Builder setOwnerName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ownerName_ = value; + onChanged(); + return this; + } + /** + * optional string owner_name = 2; + */ + public Builder clearOwnerName() { + bitField0_ = (bitField0_ & ~0x00000002); + ownerName_ = getDefaultInstance().getOwnerName(); + onChanged(); + return this; + } + /** + * optional string owner_name = 2; + */ + public Builder setOwnerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ownerName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Role) + } + + static { + defaultInstance = new Role(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Role) + } + + public interface StorageDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + java.util.List + getColsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + int getColsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + java.util.List + getColsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( + int index); + + // optional string input_format = 2; + /** + * optional string input_format = 2; + */ + boolean hasInputFormat(); + /** + * optional string input_format = 2; + */ + java.lang.String getInputFormat(); + /** + * optional string input_format = 2; + */ + com.google.protobuf.ByteString + getInputFormatBytes(); + + // optional string output_format = 3; + /** + * optional string output_format = 3; + */ + boolean hasOutputFormat(); + /** + * optional string output_format = 3; + */ + java.lang.String getOutputFormat(); + /** + * optional string output_format = 3; + */ + com.google.protobuf.ByteString + getOutputFormatBytes(); + + // optional bool is_compressed = 4; + /** + * optional bool is_compressed = 4; + */ + boolean hasIsCompressed(); + /** + * optional bool is_compressed = 4; + */ + boolean getIsCompressed(); + + // optional sint32 num_buckets = 5; + /** + * optional sint32 num_buckets = 5; + */ + boolean hasNumBuckets(); + /** + * optional sint32 num_buckets = 5; + */ + int getNumBuckets(); + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + boolean hasSerdeInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder(); + + // repeated string bucket_cols = 7; + /** + * repeated string bucket_cols = 7; + */ + java.util.List + getBucketColsList(); + /** + * repeated string bucket_cols = 7; + */ + int getBucketColsCount(); + /** + * repeated string bucket_cols = 7; + */ + java.lang.String getBucketCols(int index); + /** + * repeated string bucket_cols = 7; + */ + com.google.protobuf.ByteString + getBucketColsBytes(int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + java.util.List + getSortColsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + int getSortColsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + java.util.List + getSortColsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index); + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + boolean hasSkewedInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder(); + + // optional bool stored_as_sub_directories = 10; + /** + * optional bool stored_as_sub_directories = 10; + */ + boolean hasStoredAsSubDirectories(); + /** + * optional bool stored_as_sub_directories = 10; + */ + boolean getStoredAsSubDirectories(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor} + */ + public static final class StorageDescriptor extends + com.google.protobuf.GeneratedMessage + implements StorageDescriptorOrBuilder { + // Use StorageDescriptor.newBuilder() to construct. + private StorageDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StorageDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StorageDescriptor defaultInstance; + public static StorageDescriptor getDefaultInstance() { + return defaultInstance; + } + + public StorageDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StorageDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.PARSER, extensionRegistry)); + break; + } + case 18: { + bitField0_ |= 0x00000001; + inputFormat_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000002; + outputFormat_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000004; + isCompressed_ = input.readBool(); + break; + } + case 40: { + bitField0_ |= 0x00000008; + numBuckets_ = input.readSInt32(); + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = serdeInfo_.toBuilder(); + } + serdeInfo_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serdeInfo_); + serdeInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000040; + } + bucketCols_.add(input.readBytes()); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + sortCols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.PARSER, extensionRegistry)); + break; + } + case 74: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = skewedInfo_.toBuilder(); + } + skewedInfo_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(skewedInfo_); + skewedInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + case 80: { + bitField0_ |= 0x00000040; + storedAsSubDirectories_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = java.util.Collections.unmodifiableList(cols_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.UnmodifiableLazyStringList(bucketCols_); + } + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = java.util.Collections.unmodifiableList(sortCols_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StorageDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StorageDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface OrderOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string column_name = 1; + /** + * required string column_name = 1; + */ + boolean hasColumnName(); + /** + * required string column_name = 1; + */ + java.lang.String getColumnName(); + /** + * required string column_name = 1; + */ + com.google.protobuf.ByteString + getColumnNameBytes(); + + // optional sint32 order = 2 [default = 1]; + /** + * optional sint32 order = 2 [default = 1]; + */ + boolean hasOrder(); + /** + * optional sint32 order = 2 [default = 1]; + */ + int getOrder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} + */ + public static final class Order extends + com.google.protobuf.GeneratedMessage + implements OrderOrBuilder { + // Use Order.newBuilder() to construct. + private Order(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Order(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Order defaultInstance; + public static Order getDefaultInstance() { + return defaultInstance; + } + + public Order getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Order( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + columnName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + order_ = input.readSInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Order parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Order(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string column_name = 1; + public static final int COLUMN_NAME_FIELD_NUMBER = 1; + private java.lang.Object columnName_; + /** + * required string column_name = 1; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string column_name = 1; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + columnName_ = s; + } + return s; + } + } + /** + * required string column_name = 1; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional sint32 order = 2 [default = 1]; + public static final int ORDER_FIELD_NUMBER = 2; + private int order_; + /** + * optional sint32 order = 2 [default = 1]; + */ + public boolean hasOrder() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public int getOrder() { + return order_; + } + + private void initFields() { + columnName_ = ""; + order_ = 1; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasColumnName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getColumnNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSInt32(2, order_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getColumnNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt32Size(2, order_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + columnName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + order_ = 1; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.columnName_ = columnName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.order_ = order_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()) return this; + if (other.hasColumnName()) { + bitField0_ |= 0x00000001; + columnName_ = other.columnName_; + onChanged(); + } + if (other.hasOrder()) { + setOrder(other.getOrder()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasColumnName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string column_name = 1; + private java.lang.Object columnName_ = ""; + /** + * required string column_name = 1; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string column_name = 1; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + columnName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string column_name = 1; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string column_name = 1; + */ + public Builder setColumnName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + columnName_ = value; + onChanged(); + return this; + } + /** + * required string column_name = 1; + */ + public Builder clearColumnName() { + bitField0_ = (bitField0_ & ~0x00000001); + columnName_ = getDefaultInstance().getColumnName(); + onChanged(); + return this; + } + /** + * required string column_name = 1; + */ + public Builder setColumnNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + columnName_ = value; + onChanged(); + return this; + } + + // optional sint32 order = 2 [default = 1]; + private int order_ = 1; + /** + * optional sint32 order = 2 [default = 1]; + */ + public boolean hasOrder() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public int getOrder() { + return order_; + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public Builder setOrder(int value) { + bitField0_ |= 0x00000002; + order_ = value; + onChanged(); + return this; + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public Builder clearOrder() { + bitField0_ = (bitField0_ & ~0x00000002); + order_ = 1; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) + } + + static { + defaultInstance = new Order(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) + } + + public interface SerDeInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string name = 1; + /** + * optional string name = 1; + */ + boolean hasName(); + /** + * optional string name = 1; + */ + java.lang.String getName(); + /** + * optional string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // optional string serialization_lib = 2; + /** + * optional string serialization_lib = 2; + */ + boolean hasSerializationLib(); + /** + * optional string serialization_lib = 2; + */ + java.lang.String getSerializationLib(); + /** + * optional string serialization_lib = 2; + */ + com.google.protobuf.ByteString + getSerializationLibBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo} + */ + public static final class SerDeInfo extends + com.google.protobuf.GeneratedMessage + implements SerDeInfoOrBuilder { + // Use SerDeInfo.newBuilder() to construct. + private SerDeInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SerDeInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SerDeInfo defaultInstance; + public static SerDeInfo getDefaultInstance() { + return defaultInstance; + } + + public SerDeInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SerDeInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + serializationLib_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SerDeInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SerDeInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string serialization_lib = 2; + public static final int SERIALIZATION_LIB_FIELD_NUMBER = 2; + private java.lang.Object serializationLib_; + /** + * optional string serialization_lib = 2; + */ + public boolean hasSerializationLib() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string serialization_lib = 2; + */ + public java.lang.String getSerializationLib() { + java.lang.Object ref = serializationLib_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + serializationLib_ = s; + } + return s; + } + } + /** + * optional string serialization_lib = 2; + */ + public com.google.protobuf.ByteString + getSerializationLibBytes() { + java.lang.Object ref = serializationLib_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serializationLib_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + public static final int PARAMETERS_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + private void initFields() { + name_ = ""; + serializationLib_ = ""; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSerializationLibBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, parameters_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSerializationLibBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, parameters_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getParametersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + serializationLib_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.serializationLib_ = serializationLib_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasSerializationLib()) { + bitField0_ |= 0x00000002; + serializationLib_ = other.serializationLib_; + onChanged(); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string name = 1; + private java.lang.Object name_ = ""; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // optional string serialization_lib = 2; + private java.lang.Object serializationLib_ = ""; + /** + * optional string serialization_lib = 2; + */ + public boolean hasSerializationLib() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string serialization_lib = 2; + */ + public java.lang.String getSerializationLib() { + java.lang.Object ref = serializationLib_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + serializationLib_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string serialization_lib = 2; + */ + public com.google.protobuf.ByteString + getSerializationLibBytes() { + java.lang.Object ref = serializationLib_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serializationLib_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string serialization_lib = 2; + */ + public Builder setSerializationLib( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + serializationLib_ = value; + onChanged(); + return this; + } + /** + * optional string serialization_lib = 2; + */ + public Builder clearSerializationLib() { + bitField0_ = (bitField0_ & ~0x00000002); + serializationLib_ = getDefaultInstance().getSerializationLib(); + onChanged(); + return this; + } + /** + * optional string serialization_lib = 2; + */ + public Builder setSerializationLibBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + serializationLib_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo) + } + + static { + defaultInstance = new SerDeInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo) + } + + public interface SkewedInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string skewed_col_names = 1; + /** + * repeated string skewed_col_names = 1; + */ + java.util.List + getSkewedColNamesList(); + /** + * repeated string skewed_col_names = 1; + */ + int getSkewedColNamesCount(); + /** + * repeated string skewed_col_names = 1; + */ + java.lang.String getSkewedColNames(int index); + /** + * repeated string skewed_col_names = 1; + */ + com.google.protobuf.ByteString + getSkewedColNamesBytes(int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + java.util.List + getSkewedColValuesList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + int getSkewedColValuesCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + java.util.List + getSkewedColValuesOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( + int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + java.util.List + getSkewedColValueLocationMapsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + int getSkewedColValueLocationMapsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + java.util.List + getSkewedColValueLocationMapsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo} + */ + public static final class SkewedInfo extends + com.google.protobuf.GeneratedMessage + implements SkewedInfoOrBuilder { + // Use SkewedInfo.newBuilder() to construct. + private SkewedInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SkewedInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SkewedInfo defaultInstance; + public static SkewedInfo getDefaultInstance() { + return defaultInstance; + } + + public SkewedInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SkewedInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + skewedColNames_.add(input.readBytes()); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + skewedColValues_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + skewedColValueLocationMaps_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.UnmodifiableLazyStringList(skewedColNames_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SkewedInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SkewedInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface SkewedColValueListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string skewed_col_value = 1; + /** + * repeated string skewed_col_value = 1; + */ + java.util.List + getSkewedColValueList(); + /** + * repeated string skewed_col_value = 1; + */ + int getSkewedColValueCount(); + /** + * repeated string skewed_col_value = 1; + */ + java.lang.String getSkewedColValue(int index); + /** + * repeated string skewed_col_value = 1; + */ + com.google.protobuf.ByteString + getSkewedColValueBytes(int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList} + */ + public static final class SkewedColValueList extends + com.google.protobuf.GeneratedMessage + implements SkewedColValueListOrBuilder { + // Use SkewedColValueList.newBuilder() to construct. + private SkewedColValueList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SkewedColValueList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SkewedColValueList defaultInstance; + public static SkewedColValueList getDefaultInstance() { + return defaultInstance; + } + + public SkewedColValueList getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SkewedColValueList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + skewedColValue_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.UnmodifiableLazyStringList(skewedColValue_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SkewedColValueList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SkewedColValueList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string skewed_col_value = 1; + public static final int SKEWED_COL_VALUE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList skewedColValue_; + /** + * repeated string skewed_col_value = 1; + */ + public java.util.List + getSkewedColValueList() { + return skewedColValue_; + } + /** + * repeated string skewed_col_value = 1; + */ + public int getSkewedColValueCount() { + return skewedColValue_.size(); + } + /** + * repeated string skewed_col_value = 1; + */ + public java.lang.String getSkewedColValue(int index) { + return skewedColValue_.get(index); + } + /** + * repeated string skewed_col_value = 1; + */ + public com.google.protobuf.ByteString + getSkewedColValueBytes(int index) { + return skewedColValue_.getByteString(index); + } + + private void initFields() { + skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < skewedColValue_.size(); i++) { + output.writeBytes(1, skewedColValue_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < skewedColValue_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(skewedColValue_.getByteString(i)); + } + size += dataSize; + size += 1 * getSkewedColValueList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.UnmodifiableLazyStringList( + skewedColValue_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.skewedColValue_ = skewedColValue_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()) return this; + if (!other.skewedColValue_.isEmpty()) { + if (skewedColValue_.isEmpty()) { + skewedColValue_ = other.skewedColValue_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSkewedColValueIsMutable(); + skewedColValue_.addAll(other.skewedColValue_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string skewed_col_value = 1; + private com.google.protobuf.LazyStringList skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureSkewedColValueIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.LazyStringArrayList(skewedColValue_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string skewed_col_value = 1; + */ + public java.util.List + getSkewedColValueList() { + return java.util.Collections.unmodifiableList(skewedColValue_); + } + /** + * repeated string skewed_col_value = 1; + */ + public int getSkewedColValueCount() { + return skewedColValue_.size(); + } + /** + * repeated string skewed_col_value = 1; + */ + public java.lang.String getSkewedColValue(int index) { + return skewedColValue_.get(index); + } + /** + * repeated string skewed_col_value = 1; + */ + public com.google.protobuf.ByteString + getSkewedColValueBytes(int index) { + return skewedColValue_.getByteString(index); + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder setSkewedColValue( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueIsMutable(); + skewedColValue_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder addSkewedColValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueIsMutable(); + skewedColValue_.add(value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder addAllSkewedColValue( + java.lang.Iterable values) { + ensureSkewedColValueIsMutable(); + super.addAll(values, skewedColValue_); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder clearSkewedColValue() { + skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder addSkewedColValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueIsMutable(); + skewedColValue_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList) + } + + static { + defaultInstance = new SkewedColValueList(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList) + } + + public interface SkewedColValueLocationMapOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string key = 1; + /** + * repeated string key = 1; + */ + java.util.List + getKeyList(); + /** + * repeated string key = 1; + */ + int getKeyCount(); + /** + * repeated string key = 1; + */ + java.lang.String getKey(int index); + /** + * repeated string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(int index); + + // required string value = 2; + /** + * required string value = 2; + */ + boolean hasValue(); + /** + * required string value = 2; + */ + java.lang.String getValue(); + /** + * required string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap} + */ + public static final class SkewedColValueLocationMap extends + com.google.protobuf.GeneratedMessage + implements SkewedColValueLocationMapOrBuilder { + // Use SkewedColValueLocationMap.newBuilder() to construct. + private SkewedColValueLocationMap(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SkewedColValueLocationMap(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SkewedColValueLocationMap defaultInstance; + public static SkewedColValueLocationMap getDefaultInstance() { + return defaultInstance; + } + + public SkewedColValueLocationMap getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SkewedColValueLocationMap( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + key_.add(input.readBytes()); + break; + } + case 18: { + bitField0_ |= 0x00000001; + value_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.UnmodifiableLazyStringList(key_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SkewedColValueLocationMap parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SkewedColValueLocationMap(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // repeated string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList key_; + /** + * repeated string key = 1; + */ + public java.util.List + getKeyList() { + return key_; + } + /** + * repeated string key = 1; + */ + public int getKeyCount() { + return key_.size(); + } + /** + * repeated string key = 1; + */ + public java.lang.String getKey(int index) { + return key_.get(index); + } + /** + * repeated string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes(int index) { + return key_.getByteString(index); + } + + // required string value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + value_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < key_.size(); i++) { + output.writeBytes(1, key_.getByteString(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(2, getValueBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < key_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(key_.getByteString(i)); + } + size += dataSize; + size += 1 * getKeyList().size(); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.UnmodifiableLazyStringList( + key_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()) return this; + if (!other.key_.isEmpty()) { + if (key_.isEmpty()) { + key_ = other.key_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureKeyIsMutable(); + key_.addAll(other.key_); + } + onChanged(); + } + if (other.hasValue()) { + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasValue()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string key = 1; + private com.google.protobuf.LazyStringList key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureKeyIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.LazyStringArrayList(key_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string key = 1; + */ + public java.util.List + getKeyList() { + return java.util.Collections.unmodifiableList(key_); + } + /** + * repeated string key = 1; + */ + public int getKeyCount() { + return key_.size(); + } + /** + * repeated string key = 1; + */ + public java.lang.String getKey(int index) { + return key_.get(index); + } + /** + * repeated string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes(int index) { + return key_.getByteString(index); + } + /** + * repeated string key = 1; + */ + public Builder setKey( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyIsMutable(); + key_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder addKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyIsMutable(); + key_.add(value); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder addAllKey( + java.lang.Iterable values) { + ensureKeyIsMutable(); + super.addAll(values, key_); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder clearKey() { + key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder addKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyIsMutable(); + key_.add(value); + onChanged(); + return this; + } + + // required string value = 2; + private java.lang.Object value_ = ""; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string value = 2; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) + } + + static { + defaultInstance = new SkewedColValueLocationMap(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) + } + + // repeated string skewed_col_names = 1; + public static final int SKEWED_COL_NAMES_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList skewedColNames_; + /** + * repeated string skewed_col_names = 1; + */ + public java.util.List + getSkewedColNamesList() { + return skewedColNames_; + } + /** + * repeated string skewed_col_names = 1; + */ + public int getSkewedColNamesCount() { + return skewedColNames_.size(); + } + /** + * repeated string skewed_col_names = 1; + */ + public java.lang.String getSkewedColNames(int index) { + return skewedColNames_.get(index); + } + /** + * repeated string skewed_col_names = 1; + */ + public com.google.protobuf.ByteString + getSkewedColNamesBytes(int index) { + return skewedColNames_.getByteString(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + public static final int SKEWED_COL_VALUES_FIELD_NUMBER = 2; + private java.util.List skewedColValues_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List getSkewedColValuesList() { + return skewedColValues_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List + getSkewedColValuesOrBuilderList() { + return skewedColValues_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public int getSkewedColValuesCount() { + return skewedColValues_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index) { + return skewedColValues_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( + int index) { + return skewedColValues_.get(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + public static final int SKEWED_COL_VALUE_LOCATION_MAPS_FIELD_NUMBER = 3; + private java.util.List skewedColValueLocationMaps_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List getSkewedColValueLocationMapsList() { + return skewedColValueLocationMaps_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List + getSkewedColValueLocationMapsOrBuilderList() { + return skewedColValueLocationMaps_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public int getSkewedColValueLocationMapsCount() { + return skewedColValueLocationMaps_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index) { + return skewedColValueLocationMaps_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( + int index) { + return skewedColValueLocationMaps_.get(index); + } + + private void initFields() { + skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + skewedColValues_ = java.util.Collections.emptyList(); + skewedColValueLocationMaps_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getSkewedColValueLocationMapsCount(); i++) { + if (!getSkewedColValueLocationMaps(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < skewedColNames_.size(); i++) { + output.writeBytes(1, skewedColNames_.getByteString(i)); + } + for (int i = 0; i < skewedColValues_.size(); i++) { + output.writeMessage(2, skewedColValues_.get(i)); + } + for (int i = 0; i < skewedColValueLocationMaps_.size(); i++) { + output.writeMessage(3, skewedColValueLocationMaps_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < skewedColNames_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(skewedColNames_.getByteString(i)); + } + size += dataSize; + size += 1 * getSkewedColNamesList().size(); + } + for (int i = 0; i < skewedColValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, skewedColValues_.get(i)); + } + for (int i = 0; i < skewedColValueLocationMaps_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, skewedColValueLocationMaps_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSkewedColValuesFieldBuilder(); + getSkewedColValueLocationMapsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + if (skewedColValuesBuilder_ == null) { + skewedColValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + skewedColValuesBuilder_.clear(); + } + if (skewedColValueLocationMapsBuilder_ == null) { + skewedColValueLocationMaps_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + skewedColValueLocationMapsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.UnmodifiableLazyStringList( + skewedColNames_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.skewedColNames_ = skewedColNames_; + if (skewedColValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.skewedColValues_ = skewedColValues_; + } else { + result.skewedColValues_ = skewedColValuesBuilder_.build(); + } + if (skewedColValueLocationMapsBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.skewedColValueLocationMaps_ = skewedColValueLocationMaps_; + } else { + result.skewedColValueLocationMaps_ = skewedColValueLocationMapsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance()) return this; + if (!other.skewedColNames_.isEmpty()) { + if (skewedColNames_.isEmpty()) { + skewedColNames_ = other.skewedColNames_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSkewedColNamesIsMutable(); + skewedColNames_.addAll(other.skewedColNames_); + } + onChanged(); + } + if (skewedColValuesBuilder_ == null) { + if (!other.skewedColValues_.isEmpty()) { + if (skewedColValues_.isEmpty()) { + skewedColValues_ = other.skewedColValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureSkewedColValuesIsMutable(); + skewedColValues_.addAll(other.skewedColValues_); + } + onChanged(); + } + } else { + if (!other.skewedColValues_.isEmpty()) { + if (skewedColValuesBuilder_.isEmpty()) { + skewedColValuesBuilder_.dispose(); + skewedColValuesBuilder_ = null; + skewedColValues_ = other.skewedColValues_; + bitField0_ = (bitField0_ & ~0x00000002); + skewedColValuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSkewedColValuesFieldBuilder() : null; + } else { + skewedColValuesBuilder_.addAllMessages(other.skewedColValues_); + } + } + } + if (skewedColValueLocationMapsBuilder_ == null) { + if (!other.skewedColValueLocationMaps_.isEmpty()) { + if (skewedColValueLocationMaps_.isEmpty()) { + skewedColValueLocationMaps_ = other.skewedColValueLocationMaps_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.addAll(other.skewedColValueLocationMaps_); + } + onChanged(); + } + } else { + if (!other.skewedColValueLocationMaps_.isEmpty()) { + if (skewedColValueLocationMapsBuilder_.isEmpty()) { + skewedColValueLocationMapsBuilder_.dispose(); + skewedColValueLocationMapsBuilder_ = null; + skewedColValueLocationMaps_ = other.skewedColValueLocationMaps_; + bitField0_ = (bitField0_ & ~0x00000004); + skewedColValueLocationMapsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSkewedColValueLocationMapsFieldBuilder() : null; + } else { + skewedColValueLocationMapsBuilder_.addAllMessages(other.skewedColValueLocationMaps_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getSkewedColValueLocationMapsCount(); i++) { + if (!getSkewedColValueLocationMaps(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string skewed_col_names = 1; + private com.google.protobuf.LazyStringList skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureSkewedColNamesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.LazyStringArrayList(skewedColNames_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string skewed_col_names = 1; + */ + public java.util.List + getSkewedColNamesList() { + return java.util.Collections.unmodifiableList(skewedColNames_); + } + /** + * repeated string skewed_col_names = 1; + */ + public int getSkewedColNamesCount() { + return skewedColNames_.size(); + } + /** + * repeated string skewed_col_names = 1; + */ + public java.lang.String getSkewedColNames(int index) { + return skewedColNames_.get(index); + } + /** + * repeated string skewed_col_names = 1; + */ + public com.google.protobuf.ByteString + getSkewedColNamesBytes(int index) { + return skewedColNames_.getByteString(index); + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder setSkewedColNames( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder addSkewedColNames( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder addAllSkewedColNames( + java.lang.Iterable values) { + ensureSkewedColNamesIsMutable(); + super.addAll(values, skewedColNames_); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder clearSkewedColNames() { + skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder addSkewedColNamesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(value); + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + private java.util.List skewedColValues_ = + java.util.Collections.emptyList(); + private void ensureSkewedColValuesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = new java.util.ArrayList(skewedColValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder> skewedColValuesBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List getSkewedColValuesList() { + if (skewedColValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(skewedColValues_); + } else { + return skewedColValuesBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public int getSkewedColValuesCount() { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.size(); + } else { + return skewedColValuesBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index) { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.get(index); + } else { + return skewedColValuesBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder setSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.set(index, value); + onChanged(); + } else { + skewedColValuesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder setSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.set(index, builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(value); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(index, value); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(index, builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addAllSkewedColValues( + java.lang.Iterable values) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + super.addAll(values, skewedColValues_); + onChanged(); + } else { + skewedColValuesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder clearSkewedColValues() { + if (skewedColValuesBuilder_ == null) { + skewedColValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + skewedColValuesBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder removeSkewedColValues(int index) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.remove(index); + onChanged(); + } else { + skewedColValuesBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder getSkewedColValuesBuilder( + int index) { + return getSkewedColValuesFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( + int index) { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.get(index); } else { + return skewedColValuesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List + getSkewedColValuesOrBuilderList() { + if (skewedColValuesBuilder_ != null) { + return skewedColValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skewedColValues_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder addSkewedColValuesBuilder() { + return getSkewedColValuesFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder addSkewedColValuesBuilder( + int index) { + return getSkewedColValuesFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List + getSkewedColValuesBuilderList() { + return getSkewedColValuesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder> + getSkewedColValuesFieldBuilder() { + if (skewedColValuesBuilder_ == null) { + skewedColValuesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder>( + skewedColValues_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + skewedColValues_ = null; + } + return skewedColValuesBuilder_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + private java.util.List skewedColValueLocationMaps_ = + java.util.Collections.emptyList(); + private void ensureSkewedColValueLocationMapsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = new java.util.ArrayList(skewedColValueLocationMaps_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder> skewedColValueLocationMapsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List getSkewedColValueLocationMapsList() { + if (skewedColValueLocationMapsBuilder_ == null) { + return java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + } else { + return skewedColValueLocationMapsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public int getSkewedColValueLocationMapsCount() { + if (skewedColValueLocationMapsBuilder_ == null) { + return skewedColValueLocationMaps_.size(); + } else { + return skewedColValueLocationMapsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index) { + if (skewedColValueLocationMapsBuilder_ == null) { + return skewedColValueLocationMaps_.get(index); + } else { + return skewedColValueLocationMapsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder setSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { + if (skewedColValueLocationMapsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.set(index, value); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder setSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.set(index, builderForValue.build()); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { + if (skewedColValueLocationMapsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(value); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { + if (skewedColValueLocationMapsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(index, value); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(builderForValue.build()); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(index, builderForValue.build()); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addAllSkewedColValueLocationMaps( + java.lang.Iterable values) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + super.addAll(values, skewedColValueLocationMaps_); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder clearSkewedColValueLocationMaps() { + if (skewedColValueLocationMapsBuilder_ == null) { + skewedColValueLocationMaps_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder removeSkewedColValueLocationMaps(int index) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.remove(index); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder getSkewedColValueLocationMapsBuilder( + int index) { + return getSkewedColValueLocationMapsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( + int index) { + if (skewedColValueLocationMapsBuilder_ == null) { + return skewedColValueLocationMaps_.get(index); } else { + return skewedColValueLocationMapsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List + getSkewedColValueLocationMapsOrBuilderList() { + if (skewedColValueLocationMapsBuilder_ != null) { + return skewedColValueLocationMapsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder addSkewedColValueLocationMapsBuilder() { + return getSkewedColValueLocationMapsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder addSkewedColValueLocationMapsBuilder( + int index) { + return getSkewedColValueLocationMapsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List + getSkewedColValueLocationMapsBuilderList() { + return getSkewedColValueLocationMapsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder> + getSkewedColValueLocationMapsFieldBuilder() { + if (skewedColValueLocationMapsBuilder_ == null) { + skewedColValueLocationMapsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder>( + skewedColValueLocationMaps_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + skewedColValueLocationMaps_ = null; + } + return skewedColValueLocationMapsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo) + } + + static { + defaultInstance = new SkewedInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo) + } + + private int bitField0_; + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + public static final int COLS_FIELD_NUMBER = 1; + private java.util.List cols_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List getColsList() { + return cols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List + getColsOrBuilderList() { + return cols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public int getColsCount() { + return cols_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index) { + return cols_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( + int index) { + return cols_.get(index); + } + + // optional string input_format = 2; + public static final int INPUT_FORMAT_FIELD_NUMBER = 2; + private java.lang.Object inputFormat_; + /** + * optional string input_format = 2; + */ + public boolean hasInputFormat() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string input_format = 2; + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + inputFormat_ = s; + } + return s; + } + } + /** + * optional string input_format = 2; + */ + public com.google.protobuf.ByteString + getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string output_format = 3; + public static final int OUTPUT_FORMAT_FIELD_NUMBER = 3; + private java.lang.Object outputFormat_; + /** + * optional string output_format = 3; + */ + public boolean hasOutputFormat() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string output_format = 3; + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + outputFormat_ = s; + } + return s; + } + } + /** + * optional string output_format = 3; + */ + public com.google.protobuf.ByteString + getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bool is_compressed = 4; + public static final int IS_COMPRESSED_FIELD_NUMBER = 4; + private boolean isCompressed_; + /** + * optional bool is_compressed = 4; + */ + public boolean hasIsCompressed() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool is_compressed = 4; + */ + public boolean getIsCompressed() { + return isCompressed_; + } + + // optional sint32 num_buckets = 5; + public static final int NUM_BUCKETS_FIELD_NUMBER = 5; + private int numBuckets_; + /** + * optional sint32 num_buckets = 5; + */ + public boolean hasNumBuckets() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional sint32 num_buckets = 5; + */ + public int getNumBuckets() { + return numBuckets_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + public static final int SERDE_INFO_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo serdeInfo_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo() { + return serdeInfo_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + return serdeInfo_; + } + + // repeated string bucket_cols = 7; + public static final int BUCKET_COLS_FIELD_NUMBER = 7; + private com.google.protobuf.LazyStringList bucketCols_; + /** + * repeated string bucket_cols = 7; + */ + public java.util.List + getBucketColsList() { + return bucketCols_; + } + /** + * repeated string bucket_cols = 7; + */ + public int getBucketColsCount() { + return bucketCols_.size(); + } + /** + * repeated string bucket_cols = 7; + */ + public java.lang.String getBucketCols(int index) { + return bucketCols_.get(index); + } + /** + * repeated string bucket_cols = 7; + */ + public com.google.protobuf.ByteString + getBucketColsBytes(int index) { + return bucketCols_.getByteString(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + public static final int SORT_COLS_FIELD_NUMBER = 8; + private java.util.List sortCols_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List getSortColsList() { + return sortCols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List + getSortColsOrBuilderList() { + return sortCols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public int getSortColsCount() { + return sortCols_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index) { + return sortCols_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index) { + return sortCols_.get(index); + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + public static final int SKEWED_INFO_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo skewedInfo_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public boolean hasSkewedInfo() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo() { + return skewedInfo_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder() { + return skewedInfo_; + } + + // optional bool stored_as_sub_directories = 10; + public static final int STORED_AS_SUB_DIRECTORIES_FIELD_NUMBER = 10; + private boolean storedAsSubDirectories_; + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean hasStoredAsSubDirectories() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean getStoredAsSubDirectories() { + return storedAsSubDirectories_; + } + + private void initFields() { + cols_ = java.util.Collections.emptyList(); + inputFormat_ = ""; + outputFormat_ = ""; + isCompressed_ = false; + numBuckets_ = 0; + serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + sortCols_ = java.util.Collections.emptyList(); + skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + storedAsSubDirectories_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getColsCount(); i++) { + if (!getCols(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasSerdeInfo()) { + if (!getSerdeInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getSortColsCount(); i++) { + if (!getSortCols(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasSkewedInfo()) { + if (!getSkewedInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < cols_.size(); i++) { + output.writeMessage(1, cols_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(2, getInputFormatBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getOutputFormatBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(4, isCompressed_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeSInt32(5, numBuckets_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(6, serdeInfo_); + } + for (int i = 0; i < bucketCols_.size(); i++) { + output.writeBytes(7, bucketCols_.getByteString(i)); + } + for (int i = 0; i < sortCols_.size(); i++) { + output.writeMessage(8, sortCols_.get(i)); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(9, skewedInfo_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBool(10, storedAsSubDirectories_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < cols_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, cols_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getInputFormatBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getOutputFormatBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, isCompressed_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt32Size(5, numBuckets_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, serdeInfo_); + } + { + int dataSize = 0; + for (int i = 0; i < bucketCols_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(bucketCols_.getByteString(i)); + } + size += dataSize; + size += 1 * getBucketColsList().size(); + } + for (int i = 0; i < sortCols_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, sortCols_.get(i)); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, skewedInfo_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(10, storedAsSubDirectories_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getColsFieldBuilder(); + getSerdeInfoFieldBuilder(); + getSortColsFieldBuilder(); + getSkewedInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (colsBuilder_ == null) { + cols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + colsBuilder_.clear(); + } + inputFormat_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + outputFormat_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + isCompressed_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + numBuckets_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); + if (serdeInfoBuilder_ == null) { + serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + } else { + serdeInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + if (sortColsBuilder_ == null) { + sortCols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + sortColsBuilder_.clear(); + } + if (skewedInfoBuilder_ == null) { + skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + } else { + skewedInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + storedAsSubDirectories_ = false; + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (colsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = java.util.Collections.unmodifiableList(cols_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.cols_ = cols_; + } else { + result.cols_ = colsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.inputFormat_ = inputFormat_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.outputFormat_ = outputFormat_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.isCompressed_ = isCompressed_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.numBuckets_ = numBuckets_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + if (serdeInfoBuilder_ == null) { + result.serdeInfo_ = serdeInfo_; + } else { + result.serdeInfo_ = serdeInfoBuilder_.build(); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.UnmodifiableLazyStringList( + bucketCols_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.bucketCols_ = bucketCols_; + if (sortColsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = java.util.Collections.unmodifiableList(sortCols_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.sortCols_ = sortCols_; + } else { + result.sortCols_ = sortColsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000020; + } + if (skewedInfoBuilder_ == null) { + result.skewedInfo_ = skewedInfo_; + } else { + result.skewedInfo_ = skewedInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000040; + } + result.storedAsSubDirectories_ = storedAsSubDirectories_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.getDefaultInstance()) return this; + if (colsBuilder_ == null) { + if (!other.cols_.isEmpty()) { + if (cols_.isEmpty()) { + cols_ = other.cols_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureColsIsMutable(); + cols_.addAll(other.cols_); + } + onChanged(); + } + } else { + if (!other.cols_.isEmpty()) { + if (colsBuilder_.isEmpty()) { + colsBuilder_.dispose(); + colsBuilder_ = null; + cols_ = other.cols_; + bitField0_ = (bitField0_ & ~0x00000001); + colsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getColsFieldBuilder() : null; + } else { + colsBuilder_.addAllMessages(other.cols_); + } + } + } + if (other.hasInputFormat()) { + bitField0_ |= 0x00000002; + inputFormat_ = other.inputFormat_; + onChanged(); + } + if (other.hasOutputFormat()) { + bitField0_ |= 0x00000004; + outputFormat_ = other.outputFormat_; + onChanged(); + } + if (other.hasIsCompressed()) { + setIsCompressed(other.getIsCompressed()); + } + if (other.hasNumBuckets()) { + setNumBuckets(other.getNumBuckets()); + } + if (other.hasSerdeInfo()) { + mergeSerdeInfo(other.getSerdeInfo()); + } + if (!other.bucketCols_.isEmpty()) { + if (bucketCols_.isEmpty()) { + bucketCols_ = other.bucketCols_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureBucketColsIsMutable(); + bucketCols_.addAll(other.bucketCols_); + } + onChanged(); + } + if (sortColsBuilder_ == null) { + if (!other.sortCols_.isEmpty()) { + if (sortCols_.isEmpty()) { + sortCols_ = other.sortCols_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureSortColsIsMutable(); + sortCols_.addAll(other.sortCols_); + } + onChanged(); + } + } else { + if (!other.sortCols_.isEmpty()) { + if (sortColsBuilder_.isEmpty()) { + sortColsBuilder_.dispose(); + sortColsBuilder_ = null; + sortCols_ = other.sortCols_; + bitField0_ = (bitField0_ & ~0x00000080); + sortColsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSortColsFieldBuilder() : null; + } else { + sortColsBuilder_.addAllMessages(other.sortCols_); + } + } + } + if (other.hasSkewedInfo()) { + mergeSkewedInfo(other.getSkewedInfo()); + } + if (other.hasStoredAsSubDirectories()) { + setStoredAsSubDirectories(other.getStoredAsSubDirectories()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getColsCount(); i++) { + if (!getCols(i).isInitialized()) { + + return false; + } + } + if (hasSerdeInfo()) { + if (!getSerdeInfo().isInitialized()) { + + return false; + } + } + for (int i = 0; i < getSortColsCount(); i++) { + if (!getSortCols(i).isInitialized()) { + + return false; + } + } + if (hasSkewedInfo()) { + if (!getSkewedInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + private java.util.List cols_ = + java.util.Collections.emptyList(); + private void ensureColsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = new java.util.ArrayList(cols_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> colsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List getColsList() { + if (colsBuilder_ == null) { + return java.util.Collections.unmodifiableList(cols_); + } else { + return colsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public int getColsCount() { + if (colsBuilder_ == null) { + return cols_.size(); + } else { + return colsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index) { + if (colsBuilder_ == null) { + return cols_.get(index); + } else { + return colsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder setCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (colsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColsIsMutable(); + cols_.set(index, value); + onChanged(); + } else { + colsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder setCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.set(index, builderForValue.build()); + onChanged(); + } else { + colsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (colsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColsIsMutable(); + cols_.add(value); + onChanged(); + } else { + colsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (colsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColsIsMutable(); + cols_.add(index, value); + onChanged(); + } else { + colsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.add(builderForValue.build()); + onChanged(); + } else { + colsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.add(index, builderForValue.build()); + onChanged(); + } else { + colsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addAllCols( + java.lang.Iterable values) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + super.addAll(values, cols_); + onChanged(); + } else { + colsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder clearCols() { + if (colsBuilder_ == null) { + cols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + colsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder removeCols(int index) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.remove(index); + onChanged(); + } else { + colsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder getColsBuilder( + int index) { + return getColsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( + int index) { + if (colsBuilder_ == null) { + return cols_.get(index); } else { + return colsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List + getColsOrBuilderList() { + if (colsBuilder_ != null) { + return colsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(cols_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addColsBuilder() { + return getColsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addColsBuilder( + int index) { + return getColsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List + getColsBuilderList() { + return getColsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> + getColsFieldBuilder() { + if (colsBuilder_ == null) { + colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder>( + cols_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + cols_ = null; + } + return colsBuilder_; + } + + // optional string input_format = 2; + private java.lang.Object inputFormat_ = ""; + /** + * optional string input_format = 2; + */ + public boolean hasInputFormat() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string input_format = 2; + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + inputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string input_format = 2; + */ + public com.google.protobuf.ByteString + getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string input_format = 2; + */ + public Builder setInputFormat( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + inputFormat_ = value; + onChanged(); + return this; + } + /** + * optional string input_format = 2; + */ + public Builder clearInputFormat() { + bitField0_ = (bitField0_ & ~0x00000002); + inputFormat_ = getDefaultInstance().getInputFormat(); + onChanged(); + return this; + } + /** + * optional string input_format = 2; + */ + public Builder setInputFormatBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + inputFormat_ = value; + onChanged(); + return this; + } + + // optional string output_format = 3; + private java.lang.Object outputFormat_ = ""; + /** + * optional string output_format = 3; + */ + public boolean hasOutputFormat() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string output_format = 3; + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + outputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string output_format = 3; + */ + public com.google.protobuf.ByteString + getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string output_format = 3; + */ + public Builder setOutputFormat( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + outputFormat_ = value; + onChanged(); + return this; + } + /** + * optional string output_format = 3; + */ + public Builder clearOutputFormat() { + bitField0_ = (bitField0_ & ~0x00000004); + outputFormat_ = getDefaultInstance().getOutputFormat(); + onChanged(); + return this; + } + /** + * optional string output_format = 3; + */ + public Builder setOutputFormatBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + outputFormat_ = value; + onChanged(); + return this; + } + + // optional bool is_compressed = 4; + private boolean isCompressed_ ; + /** + * optional bool is_compressed = 4; + */ + public boolean hasIsCompressed() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool is_compressed = 4; + */ + public boolean getIsCompressed() { + return isCompressed_; + } + /** + * optional bool is_compressed = 4; + */ + public Builder setIsCompressed(boolean value) { + bitField0_ |= 0x00000008; + isCompressed_ = value; + onChanged(); + return this; + } + /** + * optional bool is_compressed = 4; + */ + public Builder clearIsCompressed() { + bitField0_ = (bitField0_ & ~0x00000008); + isCompressed_ = false; + onChanged(); + return this; + } + + // optional sint32 num_buckets = 5; + private int numBuckets_ ; + /** + * optional sint32 num_buckets = 5; + */ + public boolean hasNumBuckets() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional sint32 num_buckets = 5; + */ + public int getNumBuckets() { + return numBuckets_; + } + /** + * optional sint32 num_buckets = 5; + */ + public Builder setNumBuckets(int value) { + bitField0_ |= 0x00000010; + numBuckets_ = value; + onChanged(); + return this; + } + /** + * optional sint32 num_buckets = 5; + */ + public Builder clearNumBuckets() { + bitField0_ = (bitField0_ & ~0x00000010); + numBuckets_ = 0; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder> serdeInfoBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo() { + if (serdeInfoBuilder_ == null) { + return serdeInfo_; + } else { + return serdeInfoBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder setSerdeInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serdeInfo_ = value; + onChanged(); + } else { + serdeInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder setSerdeInfo( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder builderForValue) { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = builderForValue.build(); + onChanged(); + } else { + serdeInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder mergeSerdeInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + serdeInfo_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance()) { + serdeInfo_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder(serdeInfo_).mergeFrom(value).buildPartial(); + } else { + serdeInfo_ = value; + } + onChanged(); + } else { + serdeInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder clearSerdeInfo() { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + onChanged(); + } else { + serdeInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder getSerdeInfoBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getSerdeInfoFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + if (serdeInfoBuilder_ != null) { + return serdeInfoBuilder_.getMessageOrBuilder(); + } else { + return serdeInfo_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder> + getSerdeInfoFieldBuilder() { + if (serdeInfoBuilder_ == null) { + serdeInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder>( + serdeInfo_, + getParentForChildren(), + isClean()); + serdeInfo_ = null; + } + return serdeInfoBuilder_; + } + + // repeated string bucket_cols = 7; + private com.google.protobuf.LazyStringList bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureBucketColsIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.LazyStringArrayList(bucketCols_); + bitField0_ |= 0x00000040; + } + } + /** + * repeated string bucket_cols = 7; + */ + public java.util.List + getBucketColsList() { + return java.util.Collections.unmodifiableList(bucketCols_); + } + /** + * repeated string bucket_cols = 7; + */ + public int getBucketColsCount() { + return bucketCols_.size(); + } + /** + * repeated string bucket_cols = 7; + */ + public java.lang.String getBucketCols(int index) { + return bucketCols_.get(index); + } + /** + * repeated string bucket_cols = 7; + */ + public com.google.protobuf.ByteString + getBucketColsBytes(int index) { + return bucketCols_.getByteString(index); + } + /** + * repeated string bucket_cols = 7; + */ + public Builder setBucketCols( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder addBucketCols( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.add(value); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder addAllBucketCols( + java.lang.Iterable values) { + ensureBucketColsIsMutable(); + super.addAll(values, bucketCols_); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder clearBucketCols() { + bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder addBucketColsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.add(value); + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + private java.util.List sortCols_ = + java.util.Collections.emptyList(); + private void ensureSortColsIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = new java.util.ArrayList(sortCols_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder> sortColsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List getSortColsList() { + if (sortColsBuilder_ == null) { + return java.util.Collections.unmodifiableList(sortCols_); + } else { + return sortColsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public int getSortColsCount() { + if (sortColsBuilder_ == null) { + return sortCols_.size(); + } else { + return sortColsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index) { + if (sortColsBuilder_ == null) { + return sortCols_.get(index); + } else { + return sortColsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder setSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.set(index, value); + onChanged(); + } else { + sortColsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder setSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.set(index, builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.add(value); + onChanged(); + } else { + sortColsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.add(index, value); + onChanged(); + } else { + sortColsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(index, builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addAllSortCols( + java.lang.Iterable values) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + super.addAll(values, sortCols_); + onChanged(); + } else { + sortColsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder clearSortCols() { + if (sortColsBuilder_ == null) { + sortCols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + sortColsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder removeSortCols(int index) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.remove(index); + onChanged(); + } else { + sortColsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder getSortColsBuilder( + int index) { + return getSortColsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index) { + if (sortColsBuilder_ == null) { + return sortCols_.get(index); } else { + return sortColsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List + getSortColsOrBuilderList() { + if (sortColsBuilder_ != null) { + return sortColsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(sortCols_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder addSortColsBuilder() { + return getSortColsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder addSortColsBuilder( + int index) { + return getSortColsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List + getSortColsBuilderList() { + return getSortColsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder> + getSortColsFieldBuilder() { + if (sortColsBuilder_ == null) { + sortColsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder>( + sortCols_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + sortCols_ = null; + } + return sortColsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder> skewedInfoBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public boolean hasSkewedInfo() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo() { + if (skewedInfoBuilder_ == null) { + return skewedInfo_; + } else { + return skewedInfoBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder setSkewedInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo value) { + if (skewedInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + skewedInfo_ = value; + onChanged(); + } else { + skewedInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder setSkewedInfo( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder builderForValue) { + if (skewedInfoBuilder_ == null) { + skewedInfo_ = builderForValue.build(); + onChanged(); + } else { + skewedInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder mergeSkewedInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo value) { + if (skewedInfoBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + skewedInfo_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance()) { + skewedInfo_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder(skewedInfo_).mergeFrom(value).buildPartial(); + } else { + skewedInfo_ = value; + } + onChanged(); + } else { + skewedInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder clearSkewedInfo() { + if (skewedInfoBuilder_ == null) { + skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + onChanged(); + } else { + skewedInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder getSkewedInfoBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getSkewedInfoFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder() { + if (skewedInfoBuilder_ != null) { + return skewedInfoBuilder_.getMessageOrBuilder(); + } else { + return skewedInfo_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder> + getSkewedInfoFieldBuilder() { + if (skewedInfoBuilder_ == null) { + skewedInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder>( + skewedInfo_, + getParentForChildren(), + isClean()); + skewedInfo_ = null; + } + return skewedInfoBuilder_; + } + + // optional bool stored_as_sub_directories = 10; + private boolean storedAsSubDirectories_ ; + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean hasStoredAsSubDirectories() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean getStoredAsSubDirectories() { + return storedAsSubDirectories_; + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public Builder setStoredAsSubDirectories(boolean value) { + bitField0_ |= 0x00000200; + storedAsSubDirectories_ = value; + onChanged(); + return this; + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public Builder clearStoredAsSubDirectories() { + bitField0_ = (bitField0_ & ~0x00000200); + storedAsSubDirectories_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor) + } + + static { + defaultInstance = new StorageDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor) + } + + public interface TableOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string owner = 1; + /** + * optional string owner = 1; + */ + boolean hasOwner(); + /** + * optional string owner = 1; + */ + java.lang.String getOwner(); + /** + * optional string owner = 1; + */ + com.google.protobuf.ByteString + getOwnerBytes(); + + // optional int64 create_time = 2; + /** + * optional int64 create_time = 2; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 2; + */ + long getCreateTime(); + + // optional int64 last_access_time = 3; + /** + * optional int64 last_access_time = 3; + */ + boolean hasLastAccessTime(); + /** + * optional int64 last_access_time = 3; + */ + long getLastAccessTime(); + + // optional int64 retention = 4; + /** + * optional int64 retention = 4; + */ + boolean hasRetention(); + /** + * optional int64 retention = 4; + */ + long getRetention(); + + // optional string location = 5; + /** + * optional string location = 5; + */ + boolean hasLocation(); + /** + * optional string location = 5; + */ + java.lang.String getLocation(); + /** + * optional string location = 5; + */ + com.google.protobuf.ByteString + getLocationBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + boolean hasSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); + + // required bytes sd_hash = 7; + /** + * required bytes sd_hash = 7; + */ + boolean hasSdHash(); + /** + * required bytes sd_hash = 7; + */ + com.google.protobuf.ByteString getSdHash(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + java.util.List + getPartitionKeysList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + int getPartitionKeysCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + java.util.List + getPartitionKeysOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + + // optional string view_original_text = 10; + /** + * optional string view_original_text = 10; + */ + boolean hasViewOriginalText(); + /** + * optional string view_original_text = 10; + */ + java.lang.String getViewOriginalText(); + /** + * optional string view_original_text = 10; + */ + com.google.protobuf.ByteString + getViewOriginalTextBytes(); + + // optional string view_expanded_text = 11; + /** + * optional string view_expanded_text = 11; + */ + boolean hasViewExpandedText(); + /** + * optional string view_expanded_text = 11; + */ + java.lang.String getViewExpandedText(); + /** + * optional string view_expanded_text = 11; + */ + com.google.protobuf.ByteString + getViewExpandedTextBytes(); + + // optional string table_type = 12; + /** + * optional string table_type = 12; + */ + boolean hasTableType(); + /** + * optional string table_type = 12; + */ + java.lang.String getTableType(); + /** + * optional string table_type = 12; + */ + com.google.protobuf.ByteString + getTableTypeBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + boolean hasPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); + + // optional bool is_temporary = 14; + /** + * optional bool is_temporary = 14; + */ + boolean hasIsTemporary(); + /** + * optional bool is_temporary = 14; + */ + boolean getIsTemporary(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Table} + */ + public static final class Table extends + com.google.protobuf.GeneratedMessage + implements TableOrBuilder { + // Use Table.newBuilder() to construct. + private Table(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Table defaultInstance; + public static Table getDefaultInstance() { + return defaultInstance; + } + + public Table getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Table( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + owner_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + createTime_ = input.readInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + lastAccessTime_ = input.readInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + retention_ = input.readInt64(); + break; + } + case 42: { + bitField0_ |= 0x00000010; + location_ = input.readBytes(); + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = sdParameters_.toBuilder(); + } + sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sdParameters_); + sdParameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + case 58: { + bitField0_ |= 0x00000040; + sdHash_ = input.readBytes(); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + partitionKeys_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.PARSER, extensionRegistry)); + break; + } + case 74: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } + case 82: { + bitField0_ |= 0x00000100; + viewOriginalText_ = input.readBytes(); + break; + } + case 90: { + bitField0_ |= 0x00000200; + viewExpandedText_ = input.readBytes(); + break; + } + case 98: { + bitField0_ |= 0x00000400; + tableType_ = input.readBytes(); + break; + } + case 106: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000800) == 0x00000800)) { + subBuilder = privileges_.toBuilder(); + } + privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(privileges_); + privileges_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000800; + break; + } + case 112: { + bitField0_ |= 0x00001000; + isTemporary_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser
() { + public Table parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Table(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser
getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string owner = 1; + public static final int OWNER_FIELD_NUMBER = 1; + private java.lang.Object owner_; + /** + * optional string owner = 1; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string owner = 1; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + owner_ = s; + } + return s; + } + } + /** + * optional string owner = 1; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 create_time = 2; + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private long createTime_; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + + // optional int64 last_access_time = 3; + public static final int LAST_ACCESS_TIME_FIELD_NUMBER = 3; + private long lastAccessTime_; + /** + * optional int64 last_access_time = 3; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 last_access_time = 3; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + + // optional int64 retention = 4; + public static final int RETENTION_FIELD_NUMBER = 4; + private long retention_; + /** + * optional int64 retention = 4; + */ + public boolean hasRetention() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 retention = 4; + */ + public long getRetention() { + return retention_; + } + + // optional string location = 5; + public static final int LOCATION_FIELD_NUMBER = 5; + private java.lang.Object location_; + /** + * optional string location = 5; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string location = 5; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + location_ = s; + } + return s; + } + } + /** + * optional string location = 5; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + public static final int SD_PARAMETERS_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + return sdParameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + return sdParameters_; + } + + // required bytes sd_hash = 7; + public static final int SD_HASH_FIELD_NUMBER = 7; + private com.google.protobuf.ByteString sdHash_; + /** + * required bytes sd_hash = 7; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required bytes sd_hash = 7; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + public static final int PARTITION_KEYS_FIELD_NUMBER = 8; + private java.util.List partitionKeys_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List getPartitionKeysList() { + return partitionKeys_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List + getPartitionKeysOrBuilderList() { + return partitionKeys_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public int getPartitionKeysCount() { + return partitionKeys_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index) { + return partitionKeys_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index) { + return partitionKeys_.get(index); + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + public static final int PARAMETERS_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + // optional string view_original_text = 10; + public static final int VIEW_ORIGINAL_TEXT_FIELD_NUMBER = 10; + private java.lang.Object viewOriginalText_; + /** + * optional string view_original_text = 10; + */ + public boolean hasViewOriginalText() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional string view_original_text = 10; + */ + public java.lang.String getViewOriginalText() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + viewOriginalText_ = s; + } + return s; + } + } + /** + * optional string view_original_text = 10; + */ + public com.google.protobuf.ByteString + getViewOriginalTextBytes() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewOriginalText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string view_expanded_text = 11; + public static final int VIEW_EXPANDED_TEXT_FIELD_NUMBER = 11; + private java.lang.Object viewExpandedText_; + /** + * optional string view_expanded_text = 11; + */ + public boolean hasViewExpandedText() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string view_expanded_text = 11; + */ + public java.lang.String getViewExpandedText() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + viewExpandedText_ = s; + } + return s; + } + } + /** + * optional string view_expanded_text = 11; + */ + public com.google.protobuf.ByteString + getViewExpandedTextBytes() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewExpandedText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string table_type = 12; + public static final int TABLE_TYPE_FIELD_NUMBER = 12; + private java.lang.Object tableType_; + /** + * optional string table_type = 12; + */ + public boolean hasTableType() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string table_type = 12; + */ + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tableType_ = s; + } + return s; + } + } + /** + * optional string table_type = 12; + */ + public com.google.protobuf.ByteString + getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + public static final int PRIVILEGES_FIELD_NUMBER = 13; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + return privileges_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + return privileges_; + } + + // optional bool is_temporary = 14; + public static final int IS_TEMPORARY_FIELD_NUMBER = 14; + private boolean isTemporary_; + /** + * optional bool is_temporary = 14; + */ + public boolean hasIsTemporary() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional bool is_temporary = 14; + */ + public boolean getIsTemporary() { + return isTemporary_; + } + + private void initFields() { + owner_ = ""; + createTime_ = 0L; + lastAccessTime_ = 0L; + retention_ = 0L; + location_ = ""; + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + partitionKeys_ = java.util.Collections.emptyList(); + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + viewOriginalText_ = ""; + viewExpandedText_ = ""; + tableType_ = ""; + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + isTemporary_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSdHash()) { + memoizedIsInitialized = 0; + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getPartitionKeysCount(); i++) { + if (!getPartitionKeys(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getOwnerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, lastAccessTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, retention_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getLocationBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(6, sdParameters_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBytes(7, sdHash_); + } + for (int i = 0; i < partitionKeys_.size(); i++) { + output.writeMessage(8, partitionKeys_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(9, parameters_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeBytes(10, getViewOriginalTextBytes()); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, getViewExpandedTextBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBytes(12, getTableTypeBytes()); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + output.writeMessage(13, privileges_); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + output.writeBool(14, isTemporary_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getOwnerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, lastAccessTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, retention_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getLocationBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, sdParameters_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(7, sdHash_); + } + for (int i = 0; i < partitionKeys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, partitionKeys_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, parameters_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(10, getViewOriginalTextBytes()); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getViewExpandedTextBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(12, getTableTypeBytes()); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(13, privileges_); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(14, isTemporary_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Table} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.TableOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSdParametersFieldBuilder(); + getPartitionKeysFieldBuilder(); + getParametersFieldBuilder(); + getPrivilegesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + owner_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + lastAccessTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + retention_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + location_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + if (partitionKeysBuilder_ == null) { + partitionKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + partitionKeysBuilder_.clear(); + } + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + viewOriginalText_ = ""; + bitField0_ = (bitField0_ & ~0x00000200); + viewExpandedText_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + tableType_ = ""; + bitField0_ = (bitField0_ & ~0x00000800); + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00001000); + isTemporary_ = false; + bitField0_ = (bitField0_ & ~0x00002000); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.owner_ = owner_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lastAccessTime_ = lastAccessTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.retention_ = retention_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.location_ = location_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + if (sdParametersBuilder_ == null) { + result.sdParameters_ = sdParameters_; + } else { + result.sdParameters_ = sdParametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.sdHash_ = sdHash_; + if (partitionKeysBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.partitionKeys_ = partitionKeys_; + } else { + result.partitionKeys_ = partitionKeysBuilder_.build(); + } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + result.viewOriginalText_ = viewOriginalText_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.viewExpandedText_ = viewExpandedText_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.tableType_ = tableType_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000800; + } + if (privilegesBuilder_ == null) { + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { + to_bitField0_ |= 0x00001000; + } + result.isTemporary_ = isTemporary_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.getDefaultInstance()) return this; + if (other.hasOwner()) { + bitField0_ |= 0x00000001; + owner_ = other.owner_; + onChanged(); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasLastAccessTime()) { + setLastAccessTime(other.getLastAccessTime()); + } + if (other.hasRetention()) { + setRetention(other.getRetention()); + } + if (other.hasLocation()) { + bitField0_ |= 0x00000010; + location_ = other.location_; + onChanged(); + } + if (other.hasSdParameters()) { + mergeSdParameters(other.getSdParameters()); + } + if (other.hasSdHash()) { + setSdHash(other.getSdHash()); + } + if (partitionKeysBuilder_ == null) { + if (!other.partitionKeys_.isEmpty()) { + if (partitionKeys_.isEmpty()) { + partitionKeys_ = other.partitionKeys_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensurePartitionKeysIsMutable(); + partitionKeys_.addAll(other.partitionKeys_); + } + onChanged(); + } + } else { + if (!other.partitionKeys_.isEmpty()) { + if (partitionKeysBuilder_.isEmpty()) { + partitionKeysBuilder_.dispose(); + partitionKeysBuilder_ = null; + partitionKeys_ = other.partitionKeys_; + bitField0_ = (bitField0_ & ~0x00000080); + partitionKeysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getPartitionKeysFieldBuilder() : null; + } else { + partitionKeysBuilder_.addAllMessages(other.partitionKeys_); + } + } + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + if (other.hasViewOriginalText()) { + bitField0_ |= 0x00000200; + viewOriginalText_ = other.viewOriginalText_; + onChanged(); + } + if (other.hasViewExpandedText()) { + bitField0_ |= 0x00000400; + viewExpandedText_ = other.viewExpandedText_; + onChanged(); + } + if (other.hasTableType()) { + bitField0_ |= 0x00000800; + tableType_ = other.tableType_; + onChanged(); + } + if (other.hasPrivileges()) { + mergePrivileges(other.getPrivileges()); + } + if (other.hasIsTemporary()) { + setIsTemporary(other.getIsTemporary()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSdHash()) { + + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + + return false; + } + } + for (int i = 0; i < getPartitionKeysCount(); i++) { + if (!getPartitionKeys(i).isInitialized()) { + + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string owner = 1; + private java.lang.Object owner_ = ""; + /** + * optional string owner = 1; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string owner = 1; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + owner_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner = 1; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner = 1; + */ + public Builder setOwner( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + owner_ = value; + onChanged(); + return this; + } + /** + * optional string owner = 1; + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x00000001); + owner_ = getDefaultInstance().getOwner(); + onChanged(); + return this; + } + /** + * optional string owner = 1; + */ + public Builder setOwnerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + owner_ = value; + onChanged(); + return this; + } + + // optional int64 create_time = 2; + private long createTime_ ; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 2; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000002; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 2; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 last_access_time = 3; + private long lastAccessTime_ ; + /** + * optional int64 last_access_time = 3; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 last_access_time = 3; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + /** + * optional int64 last_access_time = 3; + */ + public Builder setLastAccessTime(long value) { + bitField0_ |= 0x00000004; + lastAccessTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 last_access_time = 3; + */ + public Builder clearLastAccessTime() { + bitField0_ = (bitField0_ & ~0x00000004); + lastAccessTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 retention = 4; + private long retention_ ; + /** + * optional int64 retention = 4; + */ + public boolean hasRetention() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 retention = 4; + */ + public long getRetention() { + return retention_; + } + /** + * optional int64 retention = 4; + */ + public Builder setRetention(long value) { + bitField0_ |= 0x00000008; + retention_ = value; + onChanged(); + return this; + } + /** + * optional int64 retention = 4; + */ + public Builder clearRetention() { + bitField0_ = (bitField0_ & ~0x00000008); + retention_ = 0L; + onChanged(); + return this; + } + + // optional string location = 5; + private java.lang.Object location_ = ""; + /** + * optional string location = 5; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string location = 5; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string location = 5; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string location = 5; + */ + public Builder setLocation( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + location_ = value; + onChanged(); + return this; + } + /** + * optional string location = 5; + */ + public Builder clearLocation() { + bitField0_ = (bitField0_ & ~0x00000010); + location_ = getDefaultInstance().getLocation(); + onChanged(); + return this; + } + /** + * optional string location = 5; + */ + public Builder setLocationBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + location_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + if (sdParametersBuilder_ == null) { + return sdParameters_; + } else { + return sdParametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sdParameters_ = value; + onChanged(); + } else { + sdParametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (sdParametersBuilder_ == null) { + sdParameters_ = builderForValue.build(); + onChanged(); + } else { + sdParametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + sdParameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); + } else { + sdParameters_ = value; + } + onChanged(); + } else { + sdParametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder clearSdParameters() { + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getSdParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + if (sdParametersBuilder_ != null) { + return sdParametersBuilder_.getMessageOrBuilder(); + } else { + return sdParameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getSdParametersFieldBuilder() { + if (sdParametersBuilder_ == null) { + sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + sdParameters_, + getParentForChildren(), + isClean()); + sdParameters_ = null; + } + return sdParametersBuilder_; + } + + // required bytes sd_hash = 7; + private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes sd_hash = 7; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required bytes sd_hash = 7; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + /** + * required bytes sd_hash = 7; + */ + public Builder setSdHash(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + sdHash_ = value; + onChanged(); + return this; + } + /** + * required bytes sd_hash = 7; + */ + public Builder clearSdHash() { + bitField0_ = (bitField0_ & ~0x00000040); + sdHash_ = getDefaultInstance().getSdHash(); + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + private java.util.List partitionKeys_ = + java.util.Collections.emptyList(); + private void ensurePartitionKeysIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = new java.util.ArrayList(partitionKeys_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> partitionKeysBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List getPartitionKeysList() { + if (partitionKeysBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitionKeys_); + } else { + return partitionKeysBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public int getPartitionKeysCount() { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.size(); + } else { + return partitionKeysBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index) { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.get(index); + } else { + return partitionKeysBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder setPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.set(index, value); + onChanged(); + } else { + partitionKeysBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder setPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.add(value); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.add(index, value); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addAllPartitionKeys( + java.lang.Iterable values) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + super.addAll(values, partitionKeys_); + onChanged(); + } else { + partitionKeysBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder clearPartitionKeys() { + if (partitionKeysBuilder_ == null) { + partitionKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + partitionKeysBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder removePartitionKeys(int index) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.remove(index); + onChanged(); + } else { + partitionKeysBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder getPartitionKeysBuilder( + int index) { + return getPartitionKeysFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index) { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.get(index); } else { + return partitionKeysBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List + getPartitionKeysOrBuilderList() { + if (partitionKeysBuilder_ != null) { + return partitionKeysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitionKeys_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addPartitionKeysBuilder() { + return getPartitionKeysFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addPartitionKeysBuilder( + int index) { + return getPartitionKeysFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List + getPartitionKeysBuilderList() { + return getPartitionKeysFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> + getPartitionKeysFieldBuilder() { + if (partitionKeysBuilder_ == null) { + partitionKeysBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder>( + partitionKeys_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + partitionKeys_ = null; + } + return partitionKeysBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // optional string view_original_text = 10; + private java.lang.Object viewOriginalText_ = ""; + /** + * optional string view_original_text = 10; + */ + public boolean hasViewOriginalText() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string view_original_text = 10; + */ + public java.lang.String getViewOriginalText() { + java.lang.Object ref = viewOriginalText_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + viewOriginalText_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string view_original_text = 10; + */ + public com.google.protobuf.ByteString + getViewOriginalTextBytes() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewOriginalText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string view_original_text = 10; + */ + public Builder setViewOriginalText( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + viewOriginalText_ = value; + onChanged(); + return this; + } + /** + * optional string view_original_text = 10; + */ + public Builder clearViewOriginalText() { + bitField0_ = (bitField0_ & ~0x00000200); + viewOriginalText_ = getDefaultInstance().getViewOriginalText(); + onChanged(); + return this; + } + /** + * optional string view_original_text = 10; + */ + public Builder setViewOriginalTextBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + viewOriginalText_ = value; + onChanged(); + return this; + } + + // optional string view_expanded_text = 11; + private java.lang.Object viewExpandedText_ = ""; + /** + * optional string view_expanded_text = 11; + */ + public boolean hasViewExpandedText() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string view_expanded_text = 11; + */ + public java.lang.String getViewExpandedText() { + java.lang.Object ref = viewExpandedText_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + viewExpandedText_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string view_expanded_text = 11; + */ + public com.google.protobuf.ByteString + getViewExpandedTextBytes() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewExpandedText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string view_expanded_text = 11; + */ + public Builder setViewExpandedText( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + viewExpandedText_ = value; + onChanged(); + return this; + } + /** + * optional string view_expanded_text = 11; + */ + public Builder clearViewExpandedText() { + bitField0_ = (bitField0_ & ~0x00000400); + viewExpandedText_ = getDefaultInstance().getViewExpandedText(); + onChanged(); + return this; + } + /** + * optional string view_expanded_text = 11; + */ + public Builder setViewExpandedTextBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + viewExpandedText_ = value; + onChanged(); + return this; + } + + // optional string table_type = 12; + private java.lang.Object tableType_ = ""; + /** + * optional string table_type = 12; + */ + public boolean hasTableType() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional string table_type = 12; + */ + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string table_type = 12; + */ + public com.google.protobuf.ByteString + getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string table_type = 12; + */ + public Builder setTableType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + tableType_ = value; + onChanged(); + return this; + } + /** + * optional string table_type = 12; + */ + public Builder clearTableType() { + bitField0_ = (bitField0_ & ~0x00000800); + tableType_ = getDefaultInstance().getTableType(); + onChanged(); + return this; + } + /** + * optional string table_type = 12; + */ + public Builder setTableTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + tableType_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + if (privilegesBuilder_ == null) { + return privileges_; + } else { + return privilegesBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + privileges_ = value; + onChanged(); + } else { + privilegesBuilder_.setMessage(value); + } + bitField0_ |= 0x00001000; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder setPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { + if (privilegesBuilder_ == null) { + privileges_ = builderForValue.build(); + onChanged(); + } else { + privilegesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00001000; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00001000) == 0x00001000) && + privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { + privileges_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); + } else { + privileges_ = value; + } + onChanged(); + } else { + privilegesBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00001000; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00001000); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { + bitField0_ |= 0x00001000; + onChanged(); + return getPrivilegesFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilder(); + } else { + return privileges_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( + privileges_, + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // optional bool is_temporary = 14; + private boolean isTemporary_ ; + /** + * optional bool is_temporary = 14; + */ + public boolean hasIsTemporary() { + return ((bitField0_ & 0x00002000) == 0x00002000); + } + /** + * optional bool is_temporary = 14; + */ + public boolean getIsTemporary() { + return isTemporary_; + } + /** + * optional bool is_temporary = 14; + */ + public Builder setIsTemporary(boolean value) { + bitField0_ |= 0x00002000; + isTemporary_ = value; + onChanged(); + return this; + } + /** + * optional bool is_temporary = 14; + */ + public Builder clearIsTemporary() { + bitField0_ = (bitField0_ & ~0x00002000); + isTemporary_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Table) + } + + static { + defaultInstance = new Table(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\033hbase_metastore_proto.proto\022&org.apach" + + "e.hadoop.hive.metastore.hbase\"\310\010\n\013Column" + + "Stats\022\025\n\rlast_analyzed\030\001 \001(\003\022\023\n\013column_t" + + "ype\030\002 \002(\t\022\021\n\tnum_nulls\030\003 \001(\003\022\033\n\023num_dist" + + "inct_values\030\004 \001(\003\022T\n\nbool_stats\030\005 \001(\0132@." + + "org.apache.hadoop.hive.metastore.hbase.C" + + "olumnStats.BooleanStats\022Q\n\nlong_stats\030\006 " + + "\001(\0132=.org.apache.hadoop.hive.metastore.h" + + "base.ColumnStats.LongStats\022U\n\014double_sta" + + "ts\030\007 \001(\0132?.org.apache.hadoop.hive.metast", + "ore.hbase.ColumnStats.DoubleStats\022U\n\014str" + + "ing_stats\030\010 \001(\0132?.org.apache.hadoop.hive" + + ".metastore.hbase.ColumnStats.StringStats" + + "\022U\n\014binary_stats\030\t \001(\0132?.org.apache.hado" + + "op.hive.metastore.hbase.ColumnStats.Stri" + + "ngStats\022W\n\rdecimal_stats\030\n \001(\0132@.org.apa" + + "che.hadoop.hive.metastore.hbase.ColumnSt" + + "ats.DecimalStats\0325\n\014BooleanStats\022\021\n\tnum_" + + "trues\030\001 \001(\003\022\022\n\nnum_falses\030\002 \001(\003\0322\n\tLongS" + + "tats\022\021\n\tlow_value\030\001 \001(\022\022\022\n\nhigh_value\030\002 ", + "\001(\022\0324\n\013DoubleStats\022\021\n\tlow_value\030\001 \001(\001\022\022\n" + + "\nhigh_value\030\002 \001(\001\032=\n\013StringStats\022\026\n\016max_" + + "col_length\030\001 \001(\003\022\026\n\016avg_col_length\030\002 \001(\001" + + "\032\365\001\n\014DecimalStats\022[\n\tlow_value\030\001 \001(\0132H.o" + + "rg.apache.hadoop.hive.metastore.hbase.Co" + + "lumnStats.DecimalStats.Decimal\022\\\n\nhigh_v" + + "alue\030\002 \001(\0132H.org.apache.hadoop.hive.meta" + + "store.hbase.ColumnStats.DecimalStats.Dec" + + "imal\032*\n\007Decimal\022\020\n\010unscaled\030\001 \002(\014\022\r\n\005sca" + + "le\030\002 \002(\005\"\246\002\n\010Database\022\023\n\013description\030\001 \001", + "(\t\022\013\n\003uri\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.or" + + "g.apache.hadoop.hive.metastore.hbase.Par" + + "ameters\022Q\n\nprivileges\030\004 \001(\0132=.org.apache" + + ".hadoop.hive.metastore.hbase.PrincipalPr" + + "ivilegeSet\022\022\n\nowner_name\030\005 \001(\t\022I\n\nowner_" + + "type\030\006 \001(\01625.org.apache.hadoop.hive.meta" + + "store.hbase.PrincipalType\":\n\013FieldSchema" + + "\022\014\n\004name\030\001 \002(\t\022\014\n\004type\030\002 \002(\t\022\017\n\007comment\030" + + "\003 \001(\t\",\n\016ParameterEntry\022\013\n\003key\030\001 \002(\t\022\r\n\005" + + "value\030\002 \002(\t\"W\n\nParameters\022I\n\tparameter\030\001", + " \003(\01326.org.apache.hadoop.hive.metastore." + + "hbase.ParameterEntry\"\360\001\n\tPartition\022\023\n\013cr" + + "eate_time\030\001 \001(\003\022\030\n\020last_access_time\030\002 \001(" + + "\003\022\020\n\010location\030\003 \001(\t\022I\n\rsd_parameters\030\004 \001" + + "(\01322.org.apache.hadoop.hive.metastore.hb" + + "ase.Parameters\022\017\n\007sd_hash\030\005 \002(\014\022F\n\nparam" + + "eters\030\006 \001(\01322.org.apache.hadoop.hive.met" + + "astore.hbase.Parameters\"\204\001\n\032PrincipalPri" + + "vilegeSetEntry\022\026\n\016principal_name\030\001 \002(\t\022N" + + "\n\nprivileges\030\002 \003(\0132:.org.apache.hadoop.h", + "ive.metastore.hbase.PrivilegeGrantInfo\"\275" + + "\001\n\025PrincipalPrivilegeSet\022Q\n\005users\030\001 \003(\0132" + + "B.org.apache.hadoop.hive.metastore.hbase" + + ".PrincipalPrivilegeSetEntry\022Q\n\005roles\030\002 \003" + + "(\0132B.org.apache.hadoop.hive.metastore.hb" + + "ase.PrincipalPrivilegeSetEntry\"\260\001\n\022Privi" + + "legeGrantInfo\022\021\n\tprivilege\030\001 \001(\t\022\023\n\013crea" + + "te_time\030\002 \001(\003\022\017\n\007grantor\030\003 \001(\t\022K\n\014granto" + + "r_type\030\004 \001(\01625.org.apache.hadoop.hive.me" + + "tastore.hbase.PrincipalType\022\024\n\014grant_opt", + "ion\030\005 \001(\010\"\374\001\n\rRoleGrantInfo\022\026\n\016principal" + + "_name\030\001 \002(\t\022M\n\016principal_type\030\002 \002(\01625.or" + + "g.apache.hadoop.hive.metastore.hbase.Pri" + + "ncipalType\022\020\n\010add_time\030\003 \001(\003\022\017\n\007grantor\030" + + "\004 \001(\t\022K\n\014grantor_type\030\005 \001(\01625.org.apache" + + ".hadoop.hive.metastore.hbase.PrincipalTy" + + "pe\022\024\n\014grant_option\030\006 \001(\010\"^\n\021RoleGrantInf" + + "oList\022I\n\ngrant_info\030\001 \003(\01325.org.apache.h" + + "adoop.hive.metastore.hbase.RoleGrantInfo" + + "\"\030\n\010RoleList\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013cr", + "eate_time\030\001 \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021" + + "StorageDescriptor\022A\n\004cols\030\001 \003(\01323.org.ap" + + "ache.hadoop.hive.metastore.hbase.FieldSc" + + "hema\022\024\n\014input_format\030\002 \001(\t\022\025\n\routput_for" + + "mat\030\003 \001(\t\022\025\n\ris_compressed\030\004 \001(\010\022\023\n\013num_" + + "buckets\030\005 \001(\021\022W\n\nserde_info\030\006 \001(\0132C.org." + + "apache.hadoop.hive.metastore.hbase.Stora" + + "geDescriptor.SerDeInfo\022\023\n\013bucket_cols\030\007 " + + "\003(\t\022R\n\tsort_cols\030\010 \003(\0132?.org.apache.hado" + + "op.hive.metastore.hbase.StorageDescripto", + "r.Order\022Y\n\013skewed_info\030\t \001(\0132D.org.apach" + + "e.hadoop.hive.metastore.hbase.StorageDes" + + "criptor.SkewedInfo\022!\n\031stored_as_sub_dire" + + "ctories\030\n \001(\010\032.\n\005Order\022\023\n\013column_name\030\001 " + + "\002(\t\022\020\n\005order\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004na" + + "me\030\001 \001(\t\022\031\n\021serialization_lib\030\002 \001(\t\022F\n\np" + + "arameters\030\003 \001(\01322.org.apache.hadoop.hive" + + ".metastore.hbase.Parameters\032\214\003\n\nSkewedIn" + + "fo\022\030\n\020skewed_col_names\030\001 \003(\t\022r\n\021skewed_c" + + "ol_values\030\002 \003(\0132W.org.apache.hadoop.hive", + ".metastore.hbase.StorageDescriptor.Skewe" + + "dInfo.SkewedColValueList\022\206\001\n\036skewed_col_" + + "value_location_maps\030\003 \003(\0132^.org.apache.h" + + "adoop.hive.metastore.hbase.StorageDescri" + + "ptor.SkewedInfo.SkewedColValueLocationMa" + + "p\032.\n\022SkewedColValueList\022\030\n\020skewed_col_va" + + "lue\030\001 \003(\t\0327\n\031SkewedColValueLocationMap\022\013" + + "\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005" + + "owner\030\001 \001(\t\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last" + + "_access_time\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n", + "\010location\030\005 \001(\t\022I\n\rsd_parameters\030\006 \001(\01322" + + ".org.apache.hadoop.hive.metastore.hbase." + + "Parameters\022\017\n\007sd_hash\030\007 \002(\014\022K\n\016partition" + + "_keys\030\010 \003(\01323.org.apache.hadoop.hive.met" + + "astore.hbase.FieldSchema\022F\n\nparameters\030\t" + + " \001(\01322.org.apache.hadoop.hive.metastore." + + "hbase.Parameters\022\032\n\022view_original_text\030\n" + + " \001(\t\022\032\n\022view_expanded_text\030\013 \001(\t\022\022\n\ntabl" + + "e_type\030\014 \001(\t\022Q\n\nprivileges\030\r \001(\0132=.org.a" + + "pache.hadoop.hive.metastore.hbase.Princi", + "palPrivilegeSet\022\024\n\014is_temporary\030\016 \001(\010*#\n" + + "\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor, + new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor, + new java.lang.String[] { "NumTrues", "NumFalses", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(3); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor, + new java.lang.String[] { "MaxColLength", "AvgColLength", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(4); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor, + new java.lang.String[] { "Unscaled", "Scale", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor, + new java.lang.String[] { "Description", "Uri", "Parameters", "Privileges", "OwnerName", "OwnerType", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor, + new java.lang.String[] { "Name", "Type", "Comment", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor, + new java.lang.String[] { "Parameter", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor, + new java.lang.String[] { "CreateTime", "LastAccessTime", "Location", "SdParameters", "SdHash", "Parameters", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor, + new java.lang.String[] { "PrincipalName", "Privileges", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor, + new java.lang.String[] { "Users", "Roles", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor, + new java.lang.String[] { "Privilege", "CreateTime", "Grantor", "GrantorType", "GrantOption", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor, + new java.lang.String[] { "PrincipalName", "PrincipalType", "AddTime", "Grantor", "GrantorType", "GrantOption", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor, + new java.lang.String[] { "GrantInfo", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor, + new java.lang.String[] { "Role", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor, + new java.lang.String[] { "CreateTime", "OwnerName", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor, + new java.lang.String[] { "Cols", "InputFormat", "OutputFormat", "IsCompressed", "NumBuckets", "SerdeInfo", "BucketCols", "SortCols", "SkewedInfo", "StoredAsSubDirectories", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor, + new java.lang.String[] { "ColumnName", "Order", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor, + new java.lang.String[] { "Name", "SerializationLib", "Parameters", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor, + new java.lang.String[] { "SkewedColNames", "SkewedColValues", "SkewedColValueLocationMaps", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor, + new java.lang.String[] { "SkewedColValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, + new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DatabaseWritable.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DatabaseWritable.java deleted file mode 100644 index 67268e0..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DatabaseWritable.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Wrapper for {@link org.apache.hadoop.hive.metastore.api.Database} that makes it writable - */ -class DatabaseWritable implements Writable { - final Database db; - - DatabaseWritable() { - this.db = new Database(); - } - - DatabaseWritable(Database db) { - this.db = db; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStr(out, db.getName()); - HBaseUtils.writeStr(out, db.getDescription()); - HBaseUtils.writeStr(out, db.getLocationUri()); - HBaseUtils.writeStrStrMap(out, db.getParameters()); - HBaseUtils.writePrivileges(out, db.getPrivileges()); - HBaseUtils.writeStr(out, db.getOwnerName()); - HBaseUtils.writeEnum(out, db.getOwnerType()); - } - - @Override - public void readFields(DataInput in) throws IOException { - db.setName(HBaseUtils.readStr(in)); - db.setDescription(HBaseUtils.readStr(in)); - db.setLocationUri(HBaseUtils.readStr(in)); - db.setParameters(HBaseUtils.readStrStrMap(in)); - db.setPrivileges(HBaseUtils.readPrivileges(in)); - db.setOwnerName(HBaseUtils.readStr(in)); - db.setOwnerType(HBaseUtils.readPrincipalType(in)); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/GrantInfoList.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/GrantInfoList.java deleted file mode 100644 index cde1b78..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/GrantInfoList.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * A class to serialize a list of grant infos. There is not a corresponding thrift object. - */ -public class GrantInfoList implements Writable{ - List grantInfos; - - GrantInfoList() { - grantInfos = new ArrayList(); - } - - GrantInfoList(List infos) { - grantInfos = infos; - } - - - @Override - public void write(DataOutput out) throws IOException { - if (grantInfos == null) { - out.writeInt(0); - } else { - out.writeInt(grantInfos.size()); - for (GrantInfoWritable info : grantInfos) { - info.write(out); - } - } - } - - @Override - public void readFields(DataInput in) throws IOException { - int size = in.readInt(); - if (size == 0) { - grantInfos = new ArrayList(); - } else { - grantInfos = new ArrayList(size); - for (int i = 0; i < size; i++) { - GrantInfoWritable info = new GrantInfoWritable(); - info.readFields(in); - grantInfos.add(info); - } - } - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/GrantInfoWritable.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/GrantInfoWritable.java deleted file mode 100644 index 2880410..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/GrantInfoWritable.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * A class to serialize grant information. There is not a corresponding thrift object. - */ -class GrantInfoWritable implements Writable { - String principalName; - PrincipalType principalType; - int addTime; - String grantor; - PrincipalType grantorType; - boolean grantOption; - - GrantInfoWritable() { - } - - /** - * - * @param name name of the user or role - * @param type whether this is a user or a role - * @param addTime time user was added to role - * @param grantor user or role who granted this principal into the role - * @param grantorType whether the grantor was a user or a role - * @param withGrantOption whether this principal has the grant option - */ - GrantInfoWritable(String name, PrincipalType type, int addTime, String grantor, - PrincipalType grantorType, boolean withGrantOption) { - principalName = name; - principalType = type; - this.addTime = addTime; - this.grantor = grantor; - this.grantorType = grantorType; - grantOption = withGrantOption; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStr(out, principalName); - out.writeInt(principalType.getValue()); - out.writeInt(addTime); - HBaseUtils.writeStr(out, grantor); - out.writeInt(grantorType.getValue()); - out.writeBoolean(grantOption); - } - - @Override - public void readFields(DataInput in) throws IOException { - principalName = HBaseUtils.readStr(in); - principalType = PrincipalType.findByValue(in.readInt()); - addTime = in.readInt(); - grantor = HBaseUtils.readStr(in); - grantorType = PrincipalType.findByValue(in.readInt()); - grantOption = in.readBoolean(); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java new file mode 100644 index 0000000..68acc1d --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hbase.client.HTableInterface; + +import java.io.IOException; +import java.util.List; + +/** + * A connection to HBase. Separated out as an interface so we can slide different transaction + * managers between our code and HBase. + */ +public interface HBaseConnection extends Configurable { + + /** + * Connects to HBase. This must be called after {@link #setConf} has been called. + * @throws IOException + */ + void connect() throws IOException; + + /** + * Close the connection. No further operations are possible after this is done. + * @throws IOException + */ + void close() throws IOException; + + /** + * Begin a transaction. + * @throws IOException + */ + void beginTransaction() throws IOException; + + /** + * Commit a transaction + * @throws IOException indicates the commit has failed + */ + void commitTransaction() throws IOException; + + /** + * Rollback a transaction + * @throws IOException + */ + void rollbackTransaction() throws IOException; + + /** + * Create a new table + * @param tableName name of the table + * @param columnFamilies name of the column families in the table + * @throws IOException + */ + void createHBaseTable(String tableName, List columnFamilies) throws IOException; + + /** + * Fetch an existing HBase table. + * @param tableName name of the table + * @return table handle + * @throws IOException + */ + HTableInterface getHBaseTable(String tableName) throws IOException; + + /** + * Fetch an existing HBase table and force a connection to it. This should be used only in + * cases where you want to assure that the table exists (ie at install). + * @param tableName name of the table + * @param force if true, force a connection by fetching a non-existant key + * @return table handle + * @throws IOException + */ + HTableInterface getHBaseTable(String tableName, boolean force) throws IOException; + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index 7b0cf95..1f336db 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -19,18 +19,13 @@ package org.apache.hadoop.hive.metastore.hbase; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.codec.binary.Base64; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -53,18 +48,13 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.io.Writable; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -78,13 +68,13 @@ */ class HBaseReadWrite { - @VisibleForTesting final static String DB_TABLE = "DBS"; - @VisibleForTesting final static String GLOBAL_PRIVS_TABLE = "GLOBAL_PRIVS"; - @VisibleForTesting final static String PART_TABLE = "PARTITIONS"; - @VisibleForTesting final static String ROLE_TABLE = "ROLES"; - @VisibleForTesting final static String SD_TABLE = "SDS"; - @VisibleForTesting final static String TABLE_TABLE = "TBLS"; - @VisibleForTesting final static String USER_TO_ROLE_TABLE = "USER_TO_ROLE"; + @VisibleForTesting final static String DB_TABLE = "HBMS_DBS"; + @VisibleForTesting final static String GLOBAL_PRIVS_TABLE = "HBMS_GLOBAL_PRIVS"; + @VisibleForTesting final static String PART_TABLE = "HBMS_PARTITIONS"; + @VisibleForTesting final static String ROLE_TABLE = "HBMS_ROLES"; + @VisibleForTesting final static String SD_TABLE = "HBMS_SDS"; + @VisibleForTesting final static String TABLE_TABLE = "HBMS_TBLS"; + @VisibleForTesting final static String USER_TO_ROLE_TABLE = "HBMS_USER_TO_ROLE"; @VisibleForTesting final static byte[] CATALOG_CF = "c".getBytes(HBaseUtils.ENCODING); @VisibleForTesting final static byte[] STATS_CF = "s".getBytes(HBaseUtils.ENCODING); @VisibleForTesting final static String NO_CACHE_CONF = "no.use.cache"; @@ -94,6 +84,8 @@ private final static byte[] GLOBAL_PRIVS_KEY = "globalprivs".getBytes(HBaseUtils.ENCODING); private final static int TABLES_TO_CACHE = 10; + @VisibleForTesting final static String TEST_CONN = "test_connection"; + private final static String[] tableNames = { DB_TABLE, GLOBAL_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, TABLE_TABLE }; static final private Log LOG = LogFactory.getLog(HBaseReadWrite.class.getName()); @@ -112,8 +104,7 @@ protected HBaseReadWrite initialValue() { private static Configuration staticConf = null; private Configuration conf; - private HConnection conn; - private Map tables; + private HBaseConnection conn; private MessageDigest md; private ObjectCache, Table> tableCache; private ObjectCache sdCache; @@ -132,7 +123,7 @@ protected HBaseReadWrite initialValue() { // roleCache doesn't use ObjectCache because I don't want to limit the size. I am assuming // that the number of roles will always be small (< 100) so caching the whole thing should not // be painful. - private Map roleCache; + private Map roleCache; boolean entireRoleTableInCache; /** @@ -161,11 +152,21 @@ private HBaseReadWrite(Configuration configuration) { HBaseConfiguration.addHbaseResources(conf); try { - conn = HConnectionManager.createConnection(conf); - } catch (IOException e) { + String connClass = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS); + if (!TEST_CONN.equals(connClass)) { + Class c = Class.forName(connClass); + Object o = c.newInstance(); + if (HBaseConnection.class.isAssignableFrom(o.getClass())) { + conn = (HBaseConnection) o; + } else { + throw new IOException(connClass + " is not an instance of HBaseConnection."); + } + conn.setConf(conf); + conn.connect(); + } + } catch (Exception e) { throw new RuntimeException(e); } - tables = new HashMap(); try { md = MessageDigest.getInstance("MD5"); @@ -213,27 +214,23 @@ private HBaseReadWrite(Configuration configuration) { statsCache = StatsCache.getInstance(conf); } - roleCache = new HashMap(); + roleCache = new HashMap(); entireRoleTableInCache = false; } // Synchronize this so not everyone's doing it at once. static synchronized void createTablesIfNotExist() throws IOException { if (!tablesCreated) { - HBaseAdmin admin = new HBaseAdmin(self.get().conn); for (String name : tableNames) { - if (self.get().getHTable(name) == null) { - LOG.info("Creating HBase table " + name); - HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(name)); - tableDesc.addFamily(new HColumnDescriptor(CATALOG_CF)); - // Only table and partitions need stats + if (self.get().conn.getHBaseTable(name, true) == null) { + List columnFamilies = new ArrayList(); + columnFamilies.add(CATALOG_CF); if (TABLE_TABLE.equals(name) || PART_TABLE.equals(name)) { - tableDesc.addFamily(new HColumnDescriptor(STATS_CF)); + columnFamilies.add(STATS_CF); } - admin.createTable(tableDesc); + self.get().conn.createHBaseTable(name, columnFamilies); } } - admin.close(); tablesCreated = true; } } @@ -246,22 +243,33 @@ static synchronized void createTablesIfNotExist() throws IOException { * Begin a transaction */ void begin() { - // NOP for now + try { + conn.beginTransaction(); + } catch (IOException e) { + throw new RuntimeException(e); + } } /** * Commit a transaction */ void commit() { - // NOP for now + try { + conn.commitTransaction(); + } catch (IOException e) { + throw new RuntimeException(e); + } } void rollback() { - // NOP for now + try { + conn.rollbackTransaction(); + } catch (IOException e) { + throw new RuntimeException(e); + } } void close() throws IOException { - for (HTableInterface htab : tables.values()) htab.close(); conn.close(); } @@ -279,9 +287,7 @@ Database getDb(String name) throws IOException { byte[] key = HBaseUtils.buildKey(name); byte[] serialized = read(DB_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; - DatabaseWritable db = new DatabaseWritable(); - HBaseUtils.deserialize(db, serialized); - return db.db; + return HBaseUtils.deserializeDatabase(name, serialized); } /** @@ -300,9 +306,9 @@ Database getDb(String name) throws IOException { scanWithFilter(DB_TABLE, null, CATALOG_CF, CATALOG_COL, filter); List databases = new ArrayList(); while (iter.hasNext()) { - DatabaseWritable db = new DatabaseWritable(); - HBaseUtils.deserialize(db, iter.next().getValue(CATALOG_CF, CATALOG_COL)); - databases.add(db.db); + Result result = iter.next(); + databases.add(HBaseUtils.deserializeDatabase(result.getRow(), + result.getValue(CATALOG_CF, CATALOG_COL))); } return databases; } @@ -313,11 +319,8 @@ Database getDb(String name) throws IOException { * @throws IOException */ void putDb(Database database) throws IOException { - DatabaseWritable db = new DatabaseWritable(database); - byte[] key = HBaseUtils.buildKey(db.db.getName()); - byte[] serialized = HBaseUtils.serialize(db); - store(DB_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - flush(); + byte[][] serialized = HBaseUtils.serializeDatabase(database); + store(DB_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); } /** @@ -328,7 +331,6 @@ void putDb(Database database) throws IOException { void deleteDb(String name) throws IOException { byte[] key = HBaseUtils.buildKey(name); delete(DB_TABLE, key, null, null); - flush(); } /********************************************************************************************** @@ -344,7 +346,7 @@ PrincipalPrivilegeSet getGlobalPrivs() throws IOException { byte[] key = GLOBAL_PRIVS_KEY; byte[] serialized = read(GLOBAL_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; - return HBaseUtils.readPrivileges(serialized); + return HBaseUtils.deserializePrincipalPrivilegeSet(serialized); } /** @@ -353,9 +355,8 @@ PrincipalPrivilegeSet getGlobalPrivs() throws IOException { */ void putGlobalPrivs(PrincipalPrivilegeSet privs) throws IOException { byte[] key = GLOBAL_PRIVS_KEY; - byte[] serialized = HBaseUtils.writePrivileges(privs); + byte[] serialized = HBaseUtils.serializePrincipalPrivilegeSet(privs); store(GLOBAL_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - flush(); } /********************************************************************************************** @@ -377,37 +378,118 @@ Partition getPartition(String dbName, String tableName, List partVals) } /** - * Add a partition + * Get a set of specific partitions. This cannot be used to do a scan, each partition must be + * completely specified. This does not use the partition cache. + * @param dbName database table is in + * @param tableName table partitions are in + * @param partValLists list of list of values, each list should uniquely identify one partition + * @return a list of partition objects. + * @throws IOException + */ + List getPartitions(String dbName, String tableName, List> partValLists) + throws IOException { + List parts = new ArrayList(partValLists.size()); + List gets = new ArrayList(partValLists.size()); + for (List partVals : partValLists) { + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partVals); + Get get = new Get(key); + get.addColumn(CATALOG_CF, CATALOG_COL); + gets.add(get); + } + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + Result[] results = htab.get(gets); + for (int i = 0; i < results.length; i++) { + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializePartition(dbName, tableName, partValLists.get(i), + results[i].getValue(CATALOG_CF, CATALOG_COL)); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + parts.add(sdParts.containingPartition); + } + + return parts; + } + + /** + * Add a partition. This should only be called for new partitions. For altering existing + * partitions this should not be called as it will blindly increment the ref counter for the + * storage descriptor. * @param partition partition object to add * @throws IOException */ void putPartition(Partition partition) throws IOException { - PartitionWritable part = new PartitionWritable(partition); - byte[] key = buildPartitionKey(part); - byte[] serialized = HBaseUtils.serialize(part); - store(PART_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - flush(); + byte[] hash = putStorageDescriptor(partition.getSd()); + byte[][] serialized = HBaseUtils.serializePartition(partition, hash); + store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); partCache.put(partition.getDbName(), partition.getTableName(), partition); } /** - * Add a group of partitions + * Replace an existing partition. + * @param oldPart partition to be replaced + * @param newPart partitiion to replace it with + * @throws IOException + */ + void replacePartition(Partition oldPart, Partition newPart) throws IOException { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldPart.getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newPart.getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldPart.getSd()); + hash = putStorageDescriptor(newPart.getSd()); + } + byte[][] serialized = HBaseUtils.serializePartition(newPart, hash); + store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + partCache.put(newPart.getDbName(), newPart.getTableName(), newPart); + } + + /** + * Add a group of partitions. This should only be used when all partitions are new. It + * blindly increments the ref count on the storage descriptor. * @param partitions list of partitions to add * @throws IOException */ void putPartitions(List partitions) throws IOException { List puts = new ArrayList(partitions.size()); for (Partition partition : partitions) { - PartitionWritable part = new PartitionWritable(partition); - byte[] key = buildPartitionKey(part); - byte[] serialized = HBaseUtils.serialize(part); - Put p = new Put(key); - p.add(CATALOG_CF, CATALOG_COL, serialized); + byte[] hash = putStorageDescriptor(partition.getSd()); + byte[][] serialized = HBaseUtils.serializePartition(partition, hash); + Put p = new Put(serialized[0]); + p.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(p); partCache.put(partition.getDbName(), partition.getTableName(), partition); } - getHTable(PART_TABLE).put(puts); - flush(); + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + htab.put(puts); + htab.flushCommits(); + } + + void replacePartitions(List oldParts, List newParts) throws IOException { + if (oldParts.size() != newParts.size()) { + throw new RuntimeException("Number of old and new partitions must match."); + } + List puts = new ArrayList(newParts.size()); + for (int i = 0; i < newParts.size(); i++) { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldParts.get(i).getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newParts.get(i).getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldParts.get(i).getSd()); + hash = putStorageDescriptor(newParts.get(i).getSd()); + } + byte[][] serialized = HBaseUtils.serializePartition(newParts.get(i), hash); + Put p = new Put(serialized[0]); + p.add(CATALOG_CF, CATALOG_COL, serialized[1]); + puts.add(p); + partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i)); + } + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + htab.put(puts); + htab.flushCommits(); } /** @@ -428,7 +510,7 @@ void putPartitions(List partitions) throws IOException { : new ArrayList(cached); } byte[] keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, tableName); - List parts = scanPartitions(keyPrefix, CATALOG_CF, CATALOG_COL, -1); + List parts = scanPartitions(keyPrefix, -1); partCache.put(dbName, tableName, parts, true); return maxPartitions < parts.size() ? parts.subList(0, maxPartitions) : parts; } @@ -514,8 +596,7 @@ void putPartitions(List partitions) throws IOException { regex + ">"); } - List parts = - scanPartitionsWithFilter(keyPrefix, CATALOG_CF, CATALOG_COL, maxPartitions, filter); + List parts = scanPartitionsWithFilter(keyPrefix, maxPartitions, filter); partCache.put(dbName, tableName, parts, false); return parts; } @@ -533,58 +614,47 @@ void deletePartition(String dbName, String tableName, List partVals) thr partCache.remove(dbName, tableName, partVals); Partition p = getPartition(dbName, tableName, partVals, false); decrementStorageDescriptorRefCount(p.getSd()); - byte[] key = buildPartitionKey(dbName, tableName, partVals); + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partVals); delete(PART_TABLE, key, null, null); - flush(); } private Partition getPartition(String dbName, String tableName, List partVals, boolean populateCache) throws IOException { Partition cached = partCache.get(dbName, tableName, partVals); if (cached != null) return cached; - byte[] key = buildPartitionKey(dbName, tableName, partVals); + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partVals); byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; - PartitionWritable part = new PartitionWritable(); - HBaseUtils.deserialize(part, serialized); - if (populateCache) partCache.put(dbName, tableName, part.part); - return part.part; + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializePartition(dbName, tableName, partVals, serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + if (populateCache) partCache.put(dbName, tableName, sdParts.containingPartition); + return sdParts.containingPartition; } - private List scanPartitions(byte[] keyPrefix, byte[] colFam, byte[] colName, - int maxResults) throws IOException { - return scanPartitionsWithFilter(keyPrefix, colFam, colName, maxResults, null); + private List scanPartitions(byte[] keyPrefix, int maxResults) throws IOException { + return scanPartitionsWithFilter(keyPrefix, maxResults, null); } - private List scanPartitionsWithFilter(byte[] keyPrefix, byte[] colFam, byte[] colName, - int maxResults, Filter filter) + private List scanPartitionsWithFilter(byte[] keyPrefix, int maxResults, Filter filter) throws IOException { Iterator iter = - scanWithFilter(PART_TABLE, keyPrefix, colFam, colName, filter); + scanWithFilter(PART_TABLE, keyPrefix, CATALOG_CF, CATALOG_COL, filter); List parts = new ArrayList(); int numToFetch = maxResults < 0 ? Integer.MAX_VALUE : maxResults; for (int i = 0; i < numToFetch && iter.hasNext(); i++) { - PartitionWritable p = new PartitionWritable(); - HBaseUtils.deserialize(p, iter.next().getValue(colFam, colName)); - parts.add(p.part); + Result result = iter.next(); + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializePartition(result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL)); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + parts.add(sdParts.containingPartition); } return parts; } - private byte[] buildPartitionKey(String dbName, String tableName, List partVals) { - Deque keyParts = new ArrayDeque(partVals); - keyParts.addFirst(tableName); - keyParts.addFirst(dbName); - return HBaseUtils.buildKey(keyParts.toArray(new String[keyParts.size()])); - } - - private byte[] buildPartitionKey(PartitionWritable part) throws IOException { - Deque keyParts = new ArrayDeque(part.part.getValues()); - keyParts.addFirst(part.part.getTableName()); - keyParts.addFirst(part.part.getDbName()); - return HBaseUtils.buildKey(keyParts.toArray(new String[keyParts.size()])); - } /********************************************************************************************** * Role related methods @@ -600,9 +670,7 @@ private Partition getPartition(String dbName, String tableName, List par byte[] key = HBaseUtils.buildKey(userName); byte[] serialized = read(USER_TO_ROLE_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; - RoleList roles = new RoleList(); - HBaseUtils.deserialize(roles, serialized); - return roles.roles; + return HBaseUtils.deserializeRoleList(serialized); } /** @@ -617,9 +685,10 @@ private Partition getPartition(String dbName, String tableName, List par buildRoleCache(); Set rolesFound = new HashSet(); - for (Map.Entry e : roleCache.entrySet()) { - for (GrantInfoWritable giw : e.getValue().grantInfos) { - if (giw.principalType == type && giw.principalName.equals(name)) { + for (Map.Entry e : roleCache.entrySet()) { + for (HbaseMetastoreProto.RoleGrantInfo giw : e.getValue().getGrantInfoList()) { + if (HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()) == type && + giw.getPrincipalName().equals(name)) { rolesFound.add(e.getKey()); break; } @@ -627,7 +696,7 @@ private Partition getPartition(String dbName, String tableName, List par } List directRoles = new ArrayList(rolesFound.size()); List gets = new ArrayList(); - HTableInterface htab = getHTable(ROLE_TABLE); + HTableInterface htab = conn.getHBaseTable(ROLE_TABLE); for (String roleFound : rolesFound) { byte[] key = HBaseUtils.buildKey(roleFound); Get g = new Get(key); @@ -639,9 +708,7 @@ private Partition getPartition(String dbName, String tableName, List par for (int i = 0; i < results.length; i++) { byte[] serialized = results[i].getValue(CATALOG_CF, CATALOG_COL); if (serialized != null) { - RoleWritable role = new RoleWritable(); - HBaseUtils.deserialize(role, serialized); - directRoles.add(role.role); + directRoles.add(HBaseUtils.deserializeRole(results[i].getRow(), serialized)); } } @@ -654,14 +721,14 @@ private Partition getPartition(String dbName, String tableName, List par * @return a list of all roles included in this role * @throws IOException */ - GrantInfoList getRolePrincipals(String roleName) throws IOException, NoSuchObjectException { - GrantInfoList rolePrincipals = roleCache.get(roleName); + HbaseMetastoreProto.RoleGrantInfoList getRolePrincipals(String roleName) + throws IOException, NoSuchObjectException { + HbaseMetastoreProto.RoleGrantInfoList rolePrincipals = roleCache.get(roleName); if (rolePrincipals != null) return rolePrincipals; byte[] key = HBaseUtils.buildKey(roleName); byte[] serialized = read(ROLE_TABLE, key, CATALOG_CF, ROLES_COL); if (serialized == null) return null; - rolePrincipals = new GrantInfoList(); - HBaseUtils.deserialize(rolePrincipals, serialized); + rolePrincipals = HbaseMetastoreProto.RoleGrantInfoList.parseFrom(serialized); roleCache.put(roleName, rolePrincipals); return rolePrincipals; } @@ -673,17 +740,16 @@ GrantInfoList getRolePrincipals(String roleName) throws IOException, NoSuchObjec * @param roleName name of the role * @return set of all users in the role * @throws IOException - * @throws NoSuchObjectException */ Set findAllUsersInRole(String roleName) throws IOException { // Walk the userToRole table and collect every user that matches this role. Set users = new HashSet(); Iterator iter = scanWithFilter(USER_TO_ROLE_TABLE, null, CATALOG_CF, CATALOG_COL, null); while (iter.hasNext()) { - RoleList roleList = new RoleList(); Result result = iter.next(); - HBaseUtils.deserialize(roleList, result.getValue(CATALOG_CF, CATALOG_COL)); - for (String rn : roleList.roles) { + List roleList = + HBaseUtils.deserializeRoleList(result.getValue(CATALOG_CF, CATALOG_COL)); + for (String rn : roleList) { if (rn.equals(roleName)) { users.add(new String(result.getRow(), HBaseUtils.ENCODING)); break; @@ -701,19 +767,22 @@ GrantInfoList getRolePrincipals(String roleName) throws IOException, NoSuchObjec * @throws NoSuchObjectException * */ - void addPrincipalToRole(String roleName, GrantInfoWritable grantInfo) + void addPrincipalToRole(String roleName, HbaseMetastoreProto.RoleGrantInfo grantInfo) throws IOException, NoSuchObjectException { - GrantInfoList rolePrincipals = getRolePrincipals(roleName); - if (rolePrincipals == null) { - // Happens the first time a principal is added to a role - rolePrincipals = new GrantInfoList(); + HbaseMetastoreProto.RoleGrantInfoList proto = getRolePrincipals(roleName); + List rolePrincipals = + new ArrayList(); + if (proto != null) { + rolePrincipals.addAll(proto.getGrantInfoList()); } - rolePrincipals.grantInfos.add(grantInfo); + + rolePrincipals.add(grantInfo); + proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + .addAllGrantInfo(rolePrincipals) + .build(); byte[] key = HBaseUtils.buildKey(roleName); - byte[] serialized = HBaseUtils.serialize(rolePrincipals); - store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, serialized); - flush(); - roleCache.put(roleName, rolePrincipals); + store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, proto.toByteArray()); + roleCache.put(roleName, proto); } /** @@ -729,24 +798,32 @@ void addPrincipalToRole(String roleName, GrantInfoWritable grantInfo) void dropPrincipalFromRole(String roleName, String principalName, PrincipalType type, boolean grantOnly) throws NoSuchObjectException, IOException { - GrantInfoList rolePrincipals = getRolePrincipals(roleName); - if (rolePrincipals == null) { - // Means there aren't any principals in this role, so probably not a problem. - return; - } - for (int i = 0; i < rolePrincipals.grantInfos.size(); i++) { - if (rolePrincipals.grantInfos.get(i).principalType == type && - rolePrincipals.grantInfos.get(i).principalName.equals(principalName)) { - if (grantOnly) rolePrincipals.grantInfos.get(i).grantOption = false; - else rolePrincipals.grantInfos.remove(i); + HbaseMetastoreProto.RoleGrantInfoList proto = getRolePrincipals(roleName); + if (proto == null) return; + List rolePrincipals = + new ArrayList(); + rolePrincipals.addAll(proto.getGrantInfoList()); + + for (int i = 0; i < rolePrincipals.size(); i++) { + if (HBaseUtils.convertPrincipalTypes(rolePrincipals.get(i).getPrincipalType()) == type && + rolePrincipals.get(i).getPrincipalName().equals(principalName)) { + if (grantOnly) { + rolePrincipals.set(i, + HbaseMetastoreProto.RoleGrantInfo.newBuilder(rolePrincipals.get(i)) + .setGrantOption(false) + .build()); + } else { + rolePrincipals.remove(i); + } break; } } byte[] key = HBaseUtils.buildKey(roleName); - byte[] serialized = HBaseUtils.serialize(rolePrincipals); - store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, serialized); - flush(); - roleCache.put(roleName, rolePrincipals); + proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + .addAllGrantInfo(rolePrincipals) + .build(); + store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, proto.toByteArray()); + roleCache.put(roleName, proto); } /** @@ -763,12 +840,11 @@ void buildRoleMapForUser(String userName) throws IOException, NoSuchObjectExcept // Second, find every role the user participates in directly. Set rolesToAdd = new HashSet(); - Set userSet = new HashSet(); Set rolesToCheckNext = new HashSet(); - userSet.add(userName); - for (Map.Entry e : roleCache.entrySet()) { - for (GrantInfoWritable grantInfo : e.getValue().grantInfos) { - if (grantInfo.principalType == PrincipalType.USER && userName.equals(grantInfo.principalName)) { + for (Map.Entry e : roleCache.entrySet()) { + for (HbaseMetastoreProto.RoleGrantInfo grantInfo : e.getValue().getGrantInfoList()) { + if (HBaseUtils.convertPrincipalTypes(grantInfo.getPrincipalType()) == PrincipalType.USER && + userName .equals(grantInfo.getPrincipalName())) { rolesToAdd.add(e.getKey()); rolesToCheckNext.add(e.getKey()); LOG.debug("Adding " + e.getKey() + " to list of roles user is in directly"); @@ -782,13 +858,13 @@ void buildRoleMapForUser(String userName) throws IOException, NoSuchObjectExcept while (rolesToCheckNext.size() > 0) { Set tmpRolesToCheckNext = new HashSet(); for (String roleName : rolesToCheckNext) { - GrantInfoList grantInfos = roleCache.get(roleName); + HbaseMetastoreProto.RoleGrantInfoList grantInfos = roleCache.get(roleName); if (grantInfos == null) continue; // happens when a role contains no grants - for (GrantInfoWritable grantInfo : grantInfos.grantInfos) { - if (grantInfo.principalType == PrincipalType.ROLE && - rolesToAdd.add(grantInfo.principalName)) { - tmpRolesToCheckNext.add(grantInfo.principalName); - LOG.debug("Adding " + grantInfo.principalName + + for (HbaseMetastoreProto.RoleGrantInfo grantInfo : grantInfos.getGrantInfoList()) { + if (HBaseUtils.convertPrincipalTypes(grantInfo.getPrincipalType()) == PrincipalType.ROLE && + rolesToAdd.add(grantInfo.getPrincipalName())) { + tmpRolesToCheckNext.add(grantInfo.getPrincipalName()); + LOG.debug("Adding " + grantInfo.getPrincipalName() + " to list of roles user is in indirectly"); } } @@ -797,14 +873,13 @@ void buildRoleMapForUser(String userName) throws IOException, NoSuchObjectExcept } byte[] key = HBaseUtils.buildKey(userName); - byte[] serialized = HBaseUtils.serialize(new RoleList(new ArrayList(rolesToAdd))); + byte[] serialized = HBaseUtils.serializeRoleList(new ArrayList(rolesToAdd)); store(USER_TO_ROLE_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - flush(); } /** * Remove all of the grants for a role. This is not cheap. - * @param roleName + * @param roleName Role to remove from all other roles and grants * @throws IOException */ void removeRoleGrants(String roleName) throws IOException { @@ -812,27 +887,35 @@ void removeRoleGrants(String roleName) throws IOException { List puts = new ArrayList(); // First, walk the role table and remove any references to this role - for (Map.Entry e : roleCache.entrySet()) { + for (Map.Entry e : roleCache.entrySet()) { boolean madeAChange = false; - for (int i = 0; i < e.getValue().grantInfos.size(); i++) { - if (e.getValue().grantInfos.get(i).principalType == PrincipalType.ROLE && - e.getValue().grantInfos.get(i).principalName.equals(roleName)) { - e.getValue().grantInfos.remove(i); + List rgil = + new ArrayList(); + rgil.addAll(e.getValue().getGrantInfoList()); + for (int i = 0; i < rgil.size(); i++) { + if (HBaseUtils.convertPrincipalTypes(rgil.get(i).getPrincipalType()) == PrincipalType.ROLE && + rgil.get(i).getPrincipalName().equals(roleName)) { + rgil.remove(i); madeAChange = true; break; } } if (madeAChange) { Put put = new Put(HBaseUtils.buildKey(e.getKey())); - put.add(CATALOG_CF, ROLES_COL, HBaseUtils.serialize(e.getValue())); + HbaseMetastoreProto.RoleGrantInfoList proto = + HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + .addAllGrantInfo(rgil) + .build(); + put.add(CATALOG_CF, ROLES_COL, proto.toByteArray()); puts.add(put); - roleCache.put(e.getKey(), e.getValue()); + roleCache.put(e.getKey(), proto); } } if (puts.size() > 0) { - HTableInterface htab = getHTable(ROLE_TABLE); + HTableInterface htab = conn.getHBaseTable(ROLE_TABLE); htab.put(puts); + htab.flushCommits(); } // Remove any global privileges held by this role @@ -851,15 +934,17 @@ void removeRoleGrants(String roleName) throws IOException { if (db.getPrivileges() != null && db.getPrivileges().getRolePrivileges() != null && db.getPrivileges().getRolePrivileges().remove(roleName) != null) { - Put put = new Put(HBaseUtils.buildKey(db.getName())); - put.add(CATALOG_CF, CATALOG_COL, HBaseUtils.serialize(new DatabaseWritable(db))); + byte[][] serialized = HBaseUtils.serializeDatabase(db); + Put put = new Put(serialized[0]); + put.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(put); } } if (puts.size() > 0) { - HTableInterface htab = getHTable(DB_TABLE); + HTableInterface htab = conn.getHBaseTable(DB_TABLE); htab.put(puts); + htab.flushCommits(); } // Finally, walk the table table @@ -871,8 +956,10 @@ void removeRoleGrants(String roleName) throws IOException { if (table.getPrivileges() != null && table.getPrivileges().getRolePrivileges() != null && table.getPrivileges().getRolePrivileges().remove(roleName) != null) { - Put put = new Put(HBaseUtils.buildKey(table.getDbName(), table.getTableName())); - put.add(CATALOG_CF, CATALOG_COL, HBaseUtils.serialize(new TableWritable(table))); + byte[][] serialized = HBaseUtils.serializeTable(table, + HBaseUtils.hashStorageDescriptor(table.getSd(), md)); + Put put = new Put(serialized[0]); + put.add(CATALOG_CF, CATALOG_COL, serialized[1]); puts.add(put); } } @@ -880,11 +967,10 @@ void removeRoleGrants(String roleName) throws IOException { } if (puts.size() > 0) { - HTableInterface htab = getHTable(TABLE_TABLE); + HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); htab.put(puts); + htab.flushCommits(); } - - flush(); } /** @@ -897,9 +983,7 @@ Role getRole(String roleName) throws IOException { byte[] key = HBaseUtils.buildKey(roleName); byte[] serialized = read(ROLE_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; - RoleWritable role = new RoleWritable(); - HBaseUtils.deserialize(role, serialized); - return role.role; + return HBaseUtils.deserializeRole(roleName, serialized); } /** @@ -911,9 +995,9 @@ Role getRole(String roleName) throws IOException { Iterator iter = scanWithFilter(ROLE_TABLE, null, CATALOG_CF, CATALOG_COL, null); List roles = new ArrayList(); while (iter.hasNext()) { - RoleWritable role = new RoleWritable(); - HBaseUtils.deserialize(role, iter.next().getValue(CATALOG_CF, CATALOG_COL)); - roles.add(role.role); + Result result = iter.next(); + roles.add(HBaseUtils.deserializeRole(result.getRow(), + result.getValue(CATALOG_CF, CATALOG_COL))); } return roles; } @@ -924,10 +1008,8 @@ Role getRole(String roleName) throws IOException { * @throws IOException */ void putRole(Role role) throws IOException { - byte[] key = HBaseUtils.buildKey(role.getRoleName()); - byte[] serialized = HBaseUtils.serialize(new RoleWritable(role)); - store(ROLE_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - flush(); + byte[][] serialized = HBaseUtils.serializeRole(role); + store(ROLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); } /** @@ -938,39 +1020,17 @@ void putRole(Role role) throws IOException { void deleteRole(String roleName) throws IOException { byte[] key = HBaseUtils.buildKey(roleName); delete(ROLE_TABLE, key, null, null); - flush(); roleCache.remove(roleName); } - private static class RoleList implements Writable { - List roles; - - RoleList() { - } - - RoleList(List r) { - roles = r; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStrList(out, roles); - } - - @Override - public void readFields(DataInput in) throws IOException { - roles = HBaseUtils.readStrList(in); - } - } - private void buildRoleCache() throws IOException { if (!entireRoleTableInCache) { Iterator roles = scanWithFilter(ROLE_TABLE, null, CATALOG_CF, ROLES_COL, null); while (roles.hasNext()) { Result res = roles.next(); String roleName = new String(res.getRow(), HBaseUtils.ENCODING); - GrantInfoList grantInfos = new GrantInfoList(); - HBaseUtils.deserialize(grantInfos, res.getValue(CATALOG_CF, ROLES_COL)); + HbaseMetastoreProto.RoleGrantInfoList grantInfos = + HbaseMetastoreProto.RoleGrantInfoList.parseFrom(res.getValue(CATALOG_CF, ROLES_COL)); roleCache.put(roleName, grantInfos); } entireRoleTableInCache = true; @@ -1016,7 +1076,7 @@ Table getTable(String dbName, String tableName) throws IOException { // Now build a single get that will fetch the remaining tables List gets = new ArrayList(); - HTableInterface htab = getHTable(TABLE_TABLE); + HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); for (int i = 0; i < tableNames.size(); i++) { if (results.get(i) != null) continue; byte[] key = HBaseUtils.buildKey(dbName, tableNames.get(i)); @@ -1029,10 +1089,12 @@ Table getTable(String dbName, String tableName) throws IOException { if (results.get(i) != null) continue; byte[] serialized = res[nextGet++].getValue(CATALOG_CF, CATALOG_COL); if (serialized != null) { - TableWritable table = new TableWritable(); - HBaseUtils.deserialize(table, serialized); - tableCache.put(hashKeys[i], table.table); - results.set(i, table.table); + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeTable(dbName, tableNames.get(i), serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + tableCache.put(hashKeys[i], sdParts.containingTable); + results.set(i, sdParts.containingTable); } } return results; @@ -1063,27 +1125,53 @@ Table getTable(String dbName, String tableName) throws IOException { scanWithFilter(TABLE_TABLE, keyPrefix, CATALOG_CF, CATALOG_COL, filter); List
tables = new ArrayList
(); while (iter.hasNext()) { - TableWritable table = new TableWritable(); - HBaseUtils.deserialize(table, iter.next().getValue(CATALOG_CF, CATALOG_COL)); - tables.add(table.table); + Result result = iter.next(); + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeTable(result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL)); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + tables.add(sdParts.containingTable); } return tables; } /** - * Put a table object + * Put a table object. This should only be called when the table is new (create table) as it + * will blindly add/increment the storage descriptor. If you are altering an existing table + * call {@link #replaceTable} instead. * @param table table object * @throws IOException */ void putTable(Table table) throws IOException { - byte[] key = HBaseUtils.buildKey(table.getDbName(), table.getTableName()); - byte[] serialized = HBaseUtils.serialize(new TableWritable(table)); - store(TABLE_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); - flush(); + byte[] hash = putStorageDescriptor(table.getSd()); + byte[][] serialized = HBaseUtils.serializeTable(table, hash); + store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); tableCache.put(new ObjectPair(table.getDbName(), table.getTableName()), table); } /** + * Replace an existing table. This will also compare the storage descriptors and see if the + * reference count needs to be adjusted + * @param oldTable old version of the table + * @param newTable new version of the table + */ + void replaceTable(Table oldTable, Table newTable) throws IOException { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldTable.getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newTable.getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldTable.getSd()); + hash = putStorageDescriptor(newTable.getSd()); + } + byte[][] serialized = HBaseUtils.serializeTable(newTable, hash); + store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + tableCache.put(new ObjectPair(newTable.getDbName(), newTable.getTableName()), + newTable); + } + + /** * Delete a table * @param dbName name of database table is in * @param tableName table to drop @@ -1096,7 +1184,6 @@ void deleteTable(String dbName, String tableName) throws IOException { decrementStorageDescriptorRefCount(t.getSd()); byte[] key = HBaseUtils.buildKey(dbName, tableName); delete(TABLE_TABLE, key, null, null); - flush(); } private Table getTable(String dbName, String tableName, boolean populateCache) @@ -1107,10 +1194,12 @@ private Table getTable(String dbName, String tableName, boolean populateCache) byte[] key = HBaseUtils.buildKey(dbName, tableName); byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, CATALOG_COL); if (serialized == null) return null; - TableWritable table = new TableWritable(); - HBaseUtils.deserialize(table, serialized); - if (populateCache) tableCache.put(hashKey, table.table); - return table.table; + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeTable(dbName, tableName, serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + if (populateCache) tableCache.put(hashKey, sdParts.containingTable); + return sdParts.containingTable; } /********************************************************************************************** @@ -1120,21 +1209,21 @@ private Table getTable(String dbName, String tableName, boolean populateCache) /** * If this serde has already been read, then return it from the cache. If not, read it, then * return it. - * @param hash - * @return + * @param hash hash of the storage descriptor to read + * @return the storage descriptor * @throws IOException */ StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { ByteArrayWrapper hashKey = new ByteArrayWrapper(hash); StorageDescriptor cached = sdCache.get(hashKey); if (cached != null) return cached; + LOG.debug("Not found in cache, looking in hbase"); byte[] serialized = read(SD_TABLE, hash, CATALOG_CF, CATALOG_COL); if (serialized == null) { throw new RuntimeException("Woh, bad! Trying to fetch a non-existent storage descriptor " + - "from hash " + hash); + "from hash " + Base64.encodeBase64String(hash)); } - StorageDescriptor sd = new StorageDescriptor(); - HBaseUtils.deserializeStorageDescriptor(sd, serialized); + StorageDescriptor sd = HBaseUtils.deserializeStorageDescriptor(serialized); sdCache.put(hashKey, sd); return sd; } @@ -1146,64 +1235,59 @@ StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { * @throws IOException */ void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException { - byte[] serialized = HBaseUtils.serializeStorageDescriptor(sd); - byte[] key = hash(serialized); - for (int i = 0; i < 10; i++) { - byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); - if (serializedRefCnt == null) { - // Someone deleted it before we got to it, no worries - return; - } - int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)); - HTableInterface htab = getHTable(SD_TABLE); - if (refCnt-- < 1) { - Delete d = new Delete(key); - if (htab.checkAndDelete(key, CATALOG_CF, REF_COUNT_COL, serializedRefCnt, d)) { - sdCache.remove(new ByteArrayWrapper(key)); - return; - } - } else { - Put p = new Put(key); - p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); - if (htab.checkAndPut(key, CATALOG_CF, REF_COUNT_COL, serializedRefCnt, p)) { - return; - } - } + byte[] key = HBaseUtils.hashStorageDescriptor(sd, md); + byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); + if (serializedRefCnt == null) { + // Someone deleted it before we got to it, no worries + return; + } + int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)); + HTableInterface htab = conn.getHBaseTable(SD_TABLE); + if (--refCnt < 1) { + Delete d = new Delete(key); + // We don't use checkAndDelete here because it isn't compatible with the transaction + // managers. If the transaction managers are doing their jobs then we should not need it + // anyway. + htab.delete(d); + sdCache.remove(new ByteArrayWrapper(key)); + } else { + Put p = new Put(key); + p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); + htab.put(p); + htab.flushCommits(); } - throw new IOException("Too many unsuccessful attepts to decrement storage counter"); } /** - * Place the common parts of a storage descriptor into the cache. + * Place the common parts of a storage descriptor into the cache and write the storage + * descriptor out to HBase. This should only be called if you are sure that the storage + * descriptor needs to be added. If you have changed a table or partition but not it's storage + * descriptor do not call this method, as it will increment the reference count of the storage + * descriptor. * @param storageDescriptor storage descriptor to store. * @return id of the entry in the cache, to be written in for the storage descriptor */ byte[] putStorageDescriptor(StorageDescriptor storageDescriptor) throws IOException { byte[] sd = HBaseUtils.serializeStorageDescriptor(storageDescriptor); - byte[] key = hash(sd); - for (int i = 0; i < 10; i++) { - byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); - HTableInterface htab = getHTable(SD_TABLE); - if (serializedRefCnt == null) { - // We are the first to put it in the DB - Put p = new Put(key); - p.add(CATALOG_CF, CATALOG_COL, sd); - p.add(CATALOG_CF, REF_COUNT_COL, "0".getBytes(HBaseUtils.ENCODING)); - if (htab.checkAndPut(key, CATALOG_CF, REF_COUNT_COL, null, p)) { - sdCache.put(new ByteArrayWrapper(key), storageDescriptor); - return key; - } - } else { - // Just increment the reference count - int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1; - Put p = new Put(key); - p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); - if (htab.checkAndPut(key, CATALOG_CF, REF_COUNT_COL, serializedRefCnt, p)) { - return key; - } - } + byte[] key = HBaseUtils.hashStorageDescriptor(storageDescriptor, md); + byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); + HTableInterface htab = conn.getHBaseTable(SD_TABLE); + if (serializedRefCnt == null) { + // We are the first to put it in the DB + Put p = new Put(key); + p.add(CATALOG_CF, CATALOG_COL, sd); + p.add(CATALOG_CF, REF_COUNT_COL, "1".getBytes(HBaseUtils.ENCODING)); + htab.put(p); + sdCache.put(new ByteArrayWrapper(key), storageDescriptor); + } else { + // Just increment the reference count + int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1; + Put p = new Put(key); + p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); + htab.put(p); } - throw new IOException("Too many unsuccessful attepts to increment storage counter"); + htab.flushCommits(); + return key; } private static class ByteArrayWrapper { @@ -1258,7 +1342,6 @@ void updateStatistics(String dbName, String tableName, String partName, List partVals) { return partVals == null ? HBaseUtils.buildKey(dbName, tableName) : - buildPartitionKey(dbName, tableName, partVals); + HBaseUtils.buildPartitionKey(dbName, tableName, partVals); } private String getStatisticsTable(List partVals) { @@ -1459,24 +1542,26 @@ private void flushRoleCache() { private void store(String table, byte[] key, byte[] colFam, byte[] colName, byte[] obj) throws IOException { - HTableInterface htab = getHTable(table); + HTableInterface htab = conn.getHBaseTable(table); Put p = new Put(key); p.add(colFam, colName, obj); htab.put(p); + htab.flushCommits(); } private void store(String table, byte[] key, byte[] colFam, byte[][] colName, byte[][] obj) throws IOException { - HTableInterface htab = getHTable(table); + HTableInterface htab = conn.getHBaseTable(table); Put p = new Put(key); for (int i = 0; i < colName.length; i++) { p.add(colFam, colName[i], obj[i]); } htab.put(p); + htab.flushCommits(); } private byte[] read(String table, byte[] key, byte[] colFam, byte[] colName) throws IOException { - HTableInterface htab = getHTable(table); + HTableInterface htab = conn.getHBaseTable(table); Get g = new Get(key); g.addColumn(colFam, colName); Result res = htab.get(g); @@ -1485,7 +1570,7 @@ private void store(String table, byte[] key, byte[] colFam, byte[][] colName, by private Result read(String table, byte[] key, byte[] colFam, byte[][] colNames) throws IOException { - HTableInterface htab = getHTable(table); + HTableInterface htab = conn.getHBaseTable(table); Get g = new Get(key); for (byte[] colName : colNames) g.addColumn(colFam, colName); return htab.get(g); @@ -1495,7 +1580,7 @@ private Result read(String table, byte[] key, byte[] colFam, byte[][] colNames) // deleted. If colName is null and colFam is not, only the named family will be deleted. If // both are null the entire row will be deleted. private void delete(String table, byte[] key, byte[] colFam, byte[] colName) throws IOException { - HTableInterface htab = getHTable(table); + HTableInterface htab = conn.getHBaseTable(table); Delete d = new Delete(key); if (colName != null) d.deleteColumn(colFam, colName); else if (colFam != null) d.deleteFamily(colFam); @@ -1504,7 +1589,7 @@ private void delete(String table, byte[] key, byte[] colFam, byte[] colName) thr private Iterator scanWithFilter(String table, byte[] keyPrefix, byte[] colFam, byte[] colName, Filter filter) throws IOException { - HTableInterface htab = getHTable(table); + HTableInterface htab = conn.getHBaseTable(table); Scan s; if (keyPrefix == null) { s = new Scan(); @@ -1519,44 +1604,23 @@ private void delete(String table, byte[] key, byte[] colFam, byte[] colName) thr return scanner.iterator(); } - private HTableInterface getHTable(String table) throws IOException { - HTableInterface htab = tables.get(table); - if (htab == null) { - LOG.debug("Trying to connect to table " + table); - try { - htab = conn.getTable(table); - // Calling gettable doesn't actually connect to the region server, it's very light - // weight, so call something else so we actually reach out and touch the region server - // and see if the table is there. - Result r = htab.get(new Get("nosuchkey".getBytes(HBaseUtils.ENCODING))); - } catch (IOException e) { - LOG.info("Caught exception when table was missing"); - return null; - } - htab.setAutoFlushTo(false); - tables.put(table, htab); - } - return htab; - } - - private void flush() throws IOException { - for (HTableInterface htab : tables.values()) htab.flushCommits(); - } - - private byte[] hash(byte[] serialized) throws IOException { - md.update(serialized); - return md.digest(); - } - /********************************************************************************************** * Testing methods and classes *********************************************************************************************/ @VisibleForTesting int countStorageDescriptor() throws IOException { - ResultScanner scanner = getHTable(SD_TABLE).getScanner(new Scan()); + ResultScanner scanner = conn.getHBaseTable(SD_TABLE).getScanner(new Scan()); int cnt = 0; - while (scanner.next() != null) cnt++; + Result r; + do { + r = scanner.next(); + if (r != null) { + LOG.debug("Saw record with hash " + Base64.encodeBase64String(r.getRow())); + cnt++; + } + } while (r != null); + return cnt; } @@ -1565,10 +1629,11 @@ int countStorageDescriptor() throws IOException { * @param connection Mock connection objecct */ @VisibleForTesting - void setConnection(HConnection connection) { + void setConnection(HBaseConnection connection) { conn = connection; } + // For testing without the cache private static class BogusObjectCache extends ObjectCache { static Counter bogus = new Counter("bogus"); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java index 0ab6551..f3dcd3f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java @@ -46,7 +46,7 @@ */ public class HBaseSchemaTool { - private static String[] commands = {"db", "part", "parts", "role", "table"}; + private static String[] commands = {"db", "part", "parts", "role", "table", "install"}; public static void main(String[] args) throws Exception { Options options = new Options(); @@ -154,6 +154,10 @@ public void db() throws IOException, TException { else dump(db); } + public void install() throws IOException { + HBaseReadWrite.createTablesIfNotExist(); + } + public void part() throws IOException, TException { if (hasStats) { Table table = hrw.getTable(dbName, tableName); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 1a9128b..7752cac 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -18,12 +18,12 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.api.AggrStats; @@ -57,16 +57,6 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MDatabase; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MRole; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MTable; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -324,9 +314,9 @@ public boolean dropPartition(String dbName, String tableName, List part_ @Override public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { - // HiveMetaStore above us has already confirmed the table exists, I'm not rechecking try { - getHBase().putTable(newTable); + Table oldTable = getHBase().getTable(dbname, name); + getHBase().replaceTable(oldTable, newTable); } catch (IOException e) { LOG.error("Unable to alter table " + tableNameForErrorMsg(dbname, name), e); throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbname, name)); @@ -398,7 +388,8 @@ public void alterTable(String dbname, String name, Table newTable) throws Invali public void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException { try { - getHBase().putPartition(new_part); + Partition oldPart = getHBase().getPartition(db_name, tbl_name, part_vals); + getHBase().replacePartition(oldPart, new_part); } catch (IOException e) { LOG.error("Unable to add partition", e); throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); @@ -410,7 +401,8 @@ public void alterPartitions(String db_name, String tbl_name, List> List new_parts) throws InvalidObjectException, MetaException { try { - getHBase().putPartitions(new_parts); + List oldParts = getHBase().getPartitions(db_name, tbl_name, part_vals_list); + getHBase().replacePartitions(oldParts, new_parts); } catch (IOException e) { LOG.error("Unable to add partition", e); throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); @@ -569,9 +561,20 @@ public boolean grantRole(Role role, String userName, PrincipalType principalType throws MetaException, NoSuchObjectException, InvalidObjectException { try { Set usersToRemap = findUsersToRemapRolesFor(role, userName, principalType); - getHBase().addPrincipalToRole(role.getRoleName(), - new GrantInfoWritable(userName, principalType, (int)(System.currentTimeMillis() / 1000), - grantor, grantorType, grantOption)); + HbaseMetastoreProto.RoleGrantInfo.Builder builder = + HbaseMetastoreProto.RoleGrantInfo.newBuilder(); + if (userName != null) builder.setPrincipalName(userName); + if (principalType != null) { + builder.setPrincipalType(HBaseUtils.convertPrincipalTypes(principalType)); + } + builder.setAddTime((int)(System.currentTimeMillis() / 1000)); + if (grantor != null) builder.setGrantor(grantor); + if (grantorType != null) { + builder.setGrantorType(HBaseUtils.convertPrincipalTypes(grantorType)); + } + builder.setGrantOption(grantOption); + + getHBase().addPrincipalToRole(role.getRoleName(), builder.build()); for (String user : usersToRemap) { getHBase().buildRoleMapForUser(user); } @@ -1063,12 +1066,13 @@ public Role getRole(String roleName) throws NoSuchObjectException { List roles = listRoles(principalName, principalType); List rpgs = new ArrayList(roles.size()); for (Role role : roles) { - GrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName()); - for (GrantInfoWritable grant : grants.grantInfos) { - if (grant.principalType.equals(principalType) && - grant.principalName.equals(principalName)) { + HbaseMetastoreProto.RoleGrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName()); + for (HbaseMetastoreProto.RoleGrantInfo grant : grants.getGrantInfoList()) { + if (grant.getPrincipalType().equals(principalType) && + grant.getPrincipalName().equals(principalName)) { rpgs.add(new RolePrincipalGrant(role.getRoleName(), principalName, principalType, - grant.grantOption, grant.addTime, grant.grantor, grant.grantorType)); + grant.getGrantOption(), (int)grant.getAddTime(), grant.getGrantor(), + HBaseUtils.convertPrincipalTypes(grant.getGrantorType()))); } } } @@ -1081,11 +1085,13 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public List listRoleMembers(String roleName) { try { - GrantInfoList gil = getHBase().getRolePrincipals(roleName); - List roleMaps = new ArrayList(gil.grantInfos.size()); - for (GrantInfoWritable giw : gil.grantInfos) { - roleMaps.add(new RolePrincipalGrant(roleName, giw.principalName, giw.principalType, - giw.grantOption, giw.addTime, giw.grantor, giw.grantorType)); + HbaseMetastoreProto.RoleGrantInfoList gil = getHBase().getRolePrincipals(roleName); + List roleMaps = new ArrayList(gil.getGrantInfoList().size()); + for (HbaseMetastoreProto.RoleGrantInfo giw : gil.getGrantInfoList()) { + roleMaps.add(new RolePrincipalGrant(roleName, giw.getPrincipalName(), + HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()), + giw.getGrantOption(), (int)giw.getAddTime(), giw.getGrantor(), + HBaseUtils.convertPrincipalTypes(giw.getGrantorType()))); } return roleMaps; } catch (Exception e) { @@ -1256,7 +1262,9 @@ public boolean removeMasterKey(Integer keySeq) { @Override public void verifySchema() throws MetaException { try { - getHBase().createTablesIfNotExist(); + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { + getHBase().createTablesIfNotExist(); + } } catch (IOException e) { LOG.fatal("Unable to verify schema ", e); throw new MetaException("Unable to verify schema"); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index b7aa01c..3c7e35e 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -18,6 +18,9 @@ */ package org.apache.hadoop.hive.metastore.hbase; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -26,35 +29,39 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; -import org.apache.hadoop.io.Writable; -import org.apache.thrift.TEnum; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.DataOutputStream; +import org.apache.hadoop.hive.metastore.api.Table; + import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; /** * Utility functions @@ -86,539 +93,835 @@ return protoKey.getBytes(ENCODING); } - static byte[] serialize(Writable writable) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - writable.write(dos); - return baos.toByteArray(); + private static HbaseMetastoreProto.Parameters buildParameters(Map params) { + List entries = + new ArrayList(); + for (Map.Entry e : params.entrySet()) { + entries.add( + HbaseMetastoreProto.ParameterEntry.newBuilder() + .setKey(e.getKey()) + .setValue(e.getValue()) + .build()); + } + return HbaseMetastoreProto.Parameters.newBuilder() + .addAllParameter(entries) + .build(); } - static void deserialize(T instance, byte[] bytes) throws IOException { - DataInput in = new DataInputStream(new ByteArrayInputStream(bytes)); - instance.readFields(in); + private static Map buildParameters(HbaseMetastoreProto.Parameters protoParams) { + Map params = new HashMap(); + for (HbaseMetastoreProto.ParameterEntry pe : protoParams.getParameterList()) { + params.put(pe.getKey(), pe.getValue()); + } + return params; } - static void writeStr(DataOutput out, String str) throws IOException { - if (str == null || str.length() == 0) { - out.writeInt(0); - return; - } else { - out.writeInt(str.length()); - out.write(str.getBytes(), 0, str.length()); + + private static List + buildPrincipalPrivilegeSetEntry(Map> entries) { + List results = + new ArrayList(); + for (Map.Entry> entry : entries.entrySet()) { + results.add(HbaseMetastoreProto.PrincipalPrivilegeSetEntry.newBuilder() + .setPrincipalName(entry.getKey()) + .addAllPrivileges(buildPrivilegeGrantInfo(entry.getValue())) + .build()); + } + return results; + } + + private static List buildPrivilegeGrantInfo( + List privileges) { + List results = + new ArrayList(); + for (PrivilegeGrantInfo privilege : privileges) { + HbaseMetastoreProto.PrivilegeGrantInfo.Builder builder = + HbaseMetastoreProto.PrivilegeGrantInfo.newBuilder(); + if (privilege.getPrivilege() != null) builder.setPrivilege(privilege.getPrivilege()); + builder.setCreateTime(privilege.getCreateTime()); + if (privilege.getGrantor() != null) builder.setGrantor(privilege.getGrantor()); + if (privilege.getGrantorType() != null) { + builder.setGrantorType(convertPrincipalTypes(privilege.getGrantorType())); + } + builder.setGrantOption(privilege.isGrantOption()); + results.add(builder.build()); } + return results; } - static String readStr(DataInput in) throws IOException { - int len = in.readInt(); - if (len == 0) { - return new String(); - } else { - byte[] b = new byte[len]; - in.readFully(b, 0, len); - return new String(b); + /** + * Convert Thrift.PrincipalType to HbaseMetastoreProto.principalType + * @param type + * @return + */ + static HbaseMetastoreProto.PrincipalType convertPrincipalTypes(PrincipalType type) { + switch (type) { + case USER: return HbaseMetastoreProto.PrincipalType.USER; + case ROLE: return HbaseMetastoreProto.PrincipalType.ROLE; + default: throw new RuntimeException("Unknown principal type " + type.toString()); } } - static void writeByteArray(DataOutput out, byte[] b) throws IOException { - if (b == null || b.length == 0) { - out.writeInt(0); - } else { - out.writeInt(b.length); - out.write(b, 0, b.length); + /** + * Convert principalType from HbaseMetastoreProto to Thrift.PrincipalType + * @param type + * @return + */ + static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType type) { + switch (type) { + case USER: return PrincipalType.USER; + case ROLE: return PrincipalType.ROLE; + default: throw new RuntimeException("Unknown principal type " + type.toString()); } } - static byte[] readByteArray(DataInput in) throws IOException { - int len = in.readInt(); - if (len == 0) { - return new byte[0]; - } else { - byte[] b = new byte[len]; - in.readFully(b, 0, len); - return b; + private static Map> convertPrincipalPrivilegeSetEntries( + List entries) { + Map> map = + new HashMap>(); + for (HbaseMetastoreProto.PrincipalPrivilegeSetEntry entry : entries) { + map.put(entry.getPrincipalName(), convertPrivilegeGrantInfos(entry.getPrivilegesList())); } + return map; + } + + private static List convertPrivilegeGrantInfos( + List privileges) { + List results = new ArrayList(); + for (HbaseMetastoreProto.PrivilegeGrantInfo proto : privileges) { + PrivilegeGrantInfo pgi = new PrivilegeGrantInfo(); + pgi.setPrivilege(proto.getPrivilege()); + pgi.setCreateTime((int)proto.getCreateTime()); + pgi.setGrantor(proto.getGrantor()); + pgi.setGrantorType(convertPrincipalTypes(proto.getGrantorType())); + pgi.setGrantOption(proto.getGrantOption()); + results.add(pgi); + } + return results; } - static void writeDecimal(DataOutput out, Decimal val) throws IOException { - HBaseUtils.writeByteArray(out, val.getUnscaled()); - out.writeShort(val.getScale()); + private static HbaseMetastoreProto.PrincipalPrivilegeSet + buildPrincipalPrivilegeSet(PrincipalPrivilegeSet pps) { + HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builder = + HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(); + if (pps.getUserPrivileges() != null) { + builder.addAllUsers(buildPrincipalPrivilegeSetEntry(pps.getUserPrivileges())); + } + if (pps.getRolePrivileges() != null) { + builder.addAllRoles(buildPrincipalPrivilegeSetEntry(pps.getRolePrivileges())); + } + return builder.build(); } - static Decimal readDecimal(DataInput in) throws IOException { - Decimal d = new Decimal(); - d.setUnscaled(HBaseUtils.readByteArray(in)); - d.setScale(in.readShort()); - return d; + private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet( + HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException { + PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); + pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList())); + pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList())); + return pps; + } + /** + * Serialize a PrincipalPrivilegeSet + * @param pps + * @return + */ + static byte[] serializePrincipalPrivilegeSet(PrincipalPrivilegeSet pps) { + return buildPrincipalPrivilegeSet(pps).toByteArray(); } - static Map readStrStrMap(DataInput in) throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new HashMap(); - } else { - Map m = new HashMap(sz); - for (int i = 0; i < sz; i++) { - m.put(readStr(in), readStr(in)); - } - return m; - } + /** + * Deserialize a PrincipalPrivilegeSet + * @param serialized + * @return + * @throws InvalidProtocolBufferException + */ + static PrincipalPrivilegeSet deserializePrincipalPrivilegeSet(byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.PrincipalPrivilegeSet proto = + HbaseMetastoreProto.PrincipalPrivilegeSet.parseFrom(serialized); + return buildPrincipalPrivilegeSet(proto); } + /** + * Serialize a role + * @param role + * @return two byte arrays, first contains the key, the second the serialized value. + */ + static byte[][] serializeRole(Role role) { + byte[][] result = new byte[2][]; + result[0] = buildKey(role.getRoleName()); + HbaseMetastoreProto.Role.Builder builder = HbaseMetastoreProto.Role.newBuilder(); + builder.setCreateTime(role.getCreateTime()); + if (role.getOwnerName() != null) builder.setOwnerName(role.getOwnerName()); + result[1] = builder.build().toByteArray(); + return result; + } - static void writeStrStrMap(DataOutput out, Map map) throws IOException { - if (map == null || map.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(map.size()); - for (Map.Entry e : map.entrySet()) { - writeStr(out, e.getKey()); - writeStr(out, e.getValue()); - } - } + /** + * Deserialize a role. This method should be used when the rolename is already known as it + * doesn't have to re-deserialize it. + * @param roleName name of the role + * @param value value fetched from hbase + * @return A role + * @throws InvalidProtocolBufferException + */ + static Role deserializeRole(String roleName, byte[] value) + throws InvalidProtocolBufferException { + Role role = new Role(); + role.setRoleName(roleName); + HbaseMetastoreProto.Role protoRole = + HbaseMetastoreProto.Role.parseFrom(value); + role.setCreateTime((int)protoRole.getCreateTime()); + role.setOwnerName(protoRole.getOwnerName()); + return role; } - static Map, String> readStrListStrMap(DataInput in) throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new HashMap, String>(); - } else { - Map, String> m = new HashMap, String>(sz); - for (int i = 0; i < sz; i++) { - m.put(readStrList(in), readStr(in)); - } - return m; - } + /** + * Deserialize a role. This method should be used when the rolename is not already known (eg + * when doing a scan). + * @param key key from hbase + * @param value value from hbase + * @return a role + * @throws InvalidProtocolBufferException + */ + static Role deserializeRole(byte[] key, byte[] value) + throws InvalidProtocolBufferException { + String roleName = new String(key, ENCODING); + return deserializeRole(roleName, value); } + /** + * Serialize a list of role names + * @param roles + * @return + */ + static byte[] serializeRoleList(List roles) { + return HbaseMetastoreProto.RoleList.newBuilder() + .addAllRole(roles) + .build() + .toByteArray(); + } - static void writeStrListStrMap(DataOutput out, Map, String> map) throws IOException { - if (map == null || map.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(map.size()); - for (Map.Entry, String> e : map.entrySet()) { - writeStrList(out, e.getKey()); - writeStr(out, e.getValue()); - } - } + static List deserializeRoleList(byte[] value) throws InvalidProtocolBufferException { + HbaseMetastoreProto.RoleList proto = HbaseMetastoreProto.RoleList.parseFrom(value); + return new ArrayList(proto.getRoleList()); } - static void writeStrList(DataOutput out, List list) throws IOException { - if (list == null || list.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(list.size()); - for (String val : list) { - writeStr(out, val); - } + /** + * Serialize a database + * @param db + * @return two byte arrays, first contains the key, the second the serialized value. + */ + static byte[][] serializeDatabase(Database db) { + byte[][] result = new byte[2][]; + result[0] = buildKey(db.getName()); + HbaseMetastoreProto.Database.Builder builder = HbaseMetastoreProto.Database.newBuilder(); + + if (db.getDescription() != null) builder.setDescription(db.getDescription()); + if (db.getLocationUri() != null) builder.setUri(db.getLocationUri()); + if (db.getParameters() != null) builder.setParameters(buildParameters(db.getParameters())); + if (db.getPrivileges() != null) { + builder.setPrivileges(buildPrincipalPrivilegeSet(db.getPrivileges())); } + if (db.getOwnerName() != null) builder.setOwnerName(db.getOwnerName()); + if (db.getOwnerType() != null) builder.setOwnerType(convertPrincipalTypes(db.getOwnerType())); + + result[1] = builder.build().toByteArray(); + return result; } - static List readStrList(DataInput in) throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new ArrayList(); - } else { - List list = new ArrayList(sz); - for (int i = 0; i < sz; i++) { - list.add(readStr(in)); - } - return list; - } + /** + * Deserialize a database. This method should be used when the db anme is already known as it + * doesn't have to re-deserialize it. + * @param dbName name of the role + * @param value value fetched from hbase + * @return A database + * @throws InvalidProtocolBufferException + */ + static Database deserializeDatabase(String dbName, byte[] value) + throws InvalidProtocolBufferException { + Database db = new Database(); + db.setName(dbName); + HbaseMetastoreProto.Database protoDb = HbaseMetastoreProto.Database.parseFrom(value); + db.setName(dbName); + db.setDescription(protoDb.getDescription()); + db.setLocationUri(protoDb.getUri()); + db.setParameters(buildParameters(protoDb.getParameters())); + db.setPrivileges(buildPrincipalPrivilegeSet(protoDb.getPrivileges())); + db.setOwnerName(protoDb.getOwnerName()); + db.setOwnerType(convertPrincipalTypes(protoDb.getOwnerType())); + + return db; } - static void writeWritableList(DataOutput out, List list) throws IOException { - if (list == null || list.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(list.size()); - for (Writable val : list) { - val.write(out); - } - } + /** + * Deserialize a database. This method should be used when the db name is not already known (eg + * when doing a scan). + * @param key key from hbase + * @param value value from hbase + * @return a role + * @throws InvalidProtocolBufferException + */ + static Database deserializeDatabase(byte[] key, byte[] value) + throws InvalidProtocolBufferException { + String dbName = new String(key, ENCODING); + return deserializeDatabase(dbName, value); } - static List readWritableList(DataInput in, Class clazz) - throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new ArrayList(); - } else { - List list = new ArrayList(sz); - for (int i = 0; i < sz; i++) { - try { - T instance = clazz.newInstance(); - instance.readFields(in); - list.add(instance); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - return list; + private static List + convertFieldSchemaListFromProto(List protoList) { + List schemas = new ArrayList(protoList.size()); + for (HbaseMetastoreProto.FieldSchema proto : protoList) { + schemas.add(new FieldSchema(proto.getName(), proto.getType(), proto.getComment())); } + return schemas; + } + + private static List + convertFieldSchemaListToProto(List schemas) { + List protoList = + new ArrayList(schemas.size()); + for (FieldSchema fs : schemas) { + HbaseMetastoreProto.FieldSchema.Builder builder = + HbaseMetastoreProto.FieldSchema.newBuilder(); + builder + .setName(fs.getName()) + .setType(fs.getType()); + if (fs.getComment() != null) builder.setComment(fs.getComment()); + protoList.add(builder.build()); + } + return protoList; } - static void writeStrListList(DataOutput out, List> list) throws IOException { - if (list == null || list.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(list.size()); - for (List vals : list) { - writeStrList(out, vals); + /** + * Serialize a storage descriptor. + * @param sd storage descriptor to serialize + * @return serialized storage descriptor. + */ + static byte[] serializeStorageDescriptor(StorageDescriptor sd) { + HbaseMetastoreProto.StorageDescriptor.Builder builder = + HbaseMetastoreProto.StorageDescriptor.newBuilder(); + builder.addAllCols(convertFieldSchemaListToProto(sd.getCols())); + if (sd.getInputFormat() != null) { + builder.setInputFormat(sd.getInputFormat()); + } + if (sd.getOutputFormat() != null) { + builder.setOutputFormat(sd.getOutputFormat()); + } + builder.setIsCompressed(sd.isCompressed()); + builder.setNumBuckets(sd.getNumBuckets()); + if (sd.getSerdeInfo() != null) { + HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder serdeBuilder = + HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder(); + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + serdeBuilder.setName(serde.getName()); + } + if (serde.getSerializationLib() != null) { + serdeBuilder.setSerializationLib(serde.getSerializationLib()); + } + if (serde.getParameters() != null) { + serdeBuilder.setParameters(buildParameters(serde.getParameters())); } + builder.setSerdeInfo(serdeBuilder); } - } - - static List> readStrListList(DataInput in) throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new ArrayList>(); - } else { - List> list = new ArrayList>(sz); - for (int i = 0; i < sz; i++) { - list.add(readStrList(in)); + if (sd.getBucketCols() != null) { + builder.addAllBucketCols(sd.getBucketCols()); + } + if (sd.getSortCols() != null) { + List orders = sd.getSortCols(); + List protoList = + new ArrayList(orders.size()); + for (Order order : orders) { + protoList.add(HbaseMetastoreProto.StorageDescriptor.Order.newBuilder() + .setColumnName(order.getCol()) + .setOrder(order.getOrder()) + .build()); } - return list; + builder.addAllSortCols(protoList); } - } - static List readFieldSchemaList(DataInput in) throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new ArrayList(); - } else { - List schemas = new ArrayList(sz); - for (int i = 0; i < sz; i++) { - schemas.add(new FieldSchema(readStr(in), readStr(in), readStr(in))); + if (sd.getSkewedInfo() != null) { + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder skewBuilder = + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder(); + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + skewBuilder.addAllSkewedColNames(skewed.getSkewedColNames()); } - return schemas; + if (skewed.getSkewedColValues() != null) { + for (List innerList : skewed.getSkewedColValues()) { + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder listBuilder = + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.newBuilder(); + listBuilder.addAllSkewedColValue(innerList); + skewBuilder.addSkewedColValues(listBuilder); + } + } + if (skewed.getSkewedColValueLocationMaps() != null) { + for (Map.Entry, String> e : skewed.getSkewedColValueLocationMaps().entrySet()) { + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder mapBuilder = + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.newBuilder(); + mapBuilder.addAllKey(e.getKey()); + mapBuilder.setValue(e.getValue()); + skewBuilder.addSkewedColValueLocationMaps(mapBuilder); + } + } + builder.setSkewedInfo(skewBuilder); } + builder.setStoredAsSubDirectories(sd.isStoredAsSubDirectories()); + + return builder.build().toByteArray(); } - static void writeFieldSchemaList(DataOutput out, List fields) throws IOException { - if (fields == null || fields.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(fields.size()); - for (FieldSchema field : fields) { - writeStr(out, field.getName()); - writeStr(out, field.getType()); - writeStr(out, field.getComment()); + /** + * Produce a hash for the storage descriptor + * @param sd storage descriptor to hash + * @param md message descriptor to use to generate the hash + * @return the hash as a byte array + */ + static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { + // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different + // results for hashes based on the OS or JVM being used. + md.reset(); + for (FieldSchema fs : sd.getCols()) { + md.update(fs.getName().getBytes(ENCODING)); + md.update(fs.getType().getBytes(ENCODING)); + if (fs.getComment() != null) md.update(fs.getComment().getBytes(ENCODING)); + } + if (sd.getInputFormat() != null) { + md.update(sd.getInputFormat().getBytes(ENCODING)); + } + if (sd.getOutputFormat() != null) { + md.update(sd.getOutputFormat().getBytes(ENCODING)); + } + md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); + if (sd.getSerdeInfo() != null) { + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + md.update(serde.getName().getBytes(ENCODING)); + } + if (serde.getSerializationLib() != null) { + md.update(serde.getSerializationLib().getBytes(ENCODING)); + } + if (serde.getParameters() != null) { + SortedMap params = new TreeMap(serde.getParameters()); + for (Map.Entry param : params.entrySet()) { + md.update(param.getKey().getBytes(ENCODING)); + md.update(param.getValue().getBytes(ENCODING)); + } } } - } - - static List readOrderList(DataInput in) throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new ArrayList(); - } else { - List orderList = new ArrayList(sz); - for (int i = 0; i < sz; i++) { - orderList.add(new Order(readStr(in), in.readInt())); + if (sd.getBucketCols() != null) { + SortedSet bucketCols = new TreeSet(sd.getBucketCols()); + for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); + } + if (sd.getSortCols() != null) { + SortedSet orders = new TreeSet(sd.getSortCols()); + for (Order order : orders) { + md.update(order.getCol().getBytes(ENCODING)); + md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); } - return orderList; } - } - - static void writeOrderList(DataOutput out, List orderList) throws IOException { - if (orderList == null || orderList.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(orderList.size()); - for (Order order : orderList) { - writeStr(out, order.getCol()); - out.writeInt(order.getOrder()); + if (sd.getSkewedInfo() != null) { + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + SortedSet colnames = new TreeSet(skewed.getSkewedColNames()); + for (String colname : colnames) md.update(colname.getBytes(ENCODING)); + } + if (skewed.getSkewedColValues() != null) { + SortedSet sortedOuterList = new TreeSet(); + for (List innerList : skewed.getSkewedColValues()) { + SortedSet sortedInnerList = new TreeSet(innerList); + sortedOuterList.add(StringUtils.join(sortedInnerList, ".")); + } + for (String colval : sortedOuterList) md.update(colval.getBytes(ENCODING)); + } + if (skewed.getSkewedColValueLocationMaps() != null) { + SortedMap sortedMap = new TreeMap(); + for (Map.Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { + SortedSet sortedKey = new TreeSet(smap.getKey()); + sortedMap.put(StringUtils.join(sortedKey, "."), smap.getValue()); + } + for (Map.Entry e : sortedMap.entrySet()) { + md.update(e.getKey().getBytes(ENCODING)); + md.update(e.getValue().getBytes(ENCODING)); + } } } - } - static PrincipalPrivilegeSet readPrivileges(byte[] bytes) throws IOException { - DataInput in = new DataInputStream(new ByteArrayInputStream(bytes)); - return readPrivileges(in); - } - - static PrincipalPrivilegeSet readPrivileges(DataInput in) throws IOException { - if (in.readBoolean()) { - PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); - pps.setUserPrivileges(readPrivilege(in)); - pps.setRolePrivileges(readPrivilege(in)); - // we ignore group privileges because we don't support old auth - return pps; - } else { - return new PrincipalPrivilegeSet(); + return md.digest(); + } + + static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.StorageDescriptor proto = + HbaseMetastoreProto.StorageDescriptor.parseFrom(serialized); + StorageDescriptor sd = new StorageDescriptor(); + sd.setCols(convertFieldSchemaListFromProto(proto.getColsList())); + sd.setInputFormat(proto.getInputFormat()); + sd.setOutputFormat(proto.getOutputFormat()); + sd.setCompressed(proto.getIsCompressed()); + sd.setNumBuckets(proto.getNumBuckets()); + SerDeInfo serde = new SerDeInfo(); + serde.setName(proto.getSerdeInfo().getName()); + serde.setSerializationLib(proto.getSerdeInfo().getSerializationLib()); + serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters())); + sd.setSerdeInfo(serde); + sd.setBucketCols(new ArrayList(proto.getBucketColsList())); + List sortCols = new ArrayList(); + for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) { + sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder())); } - + sd.setSortCols(sortCols); + SkewedInfo skewed = new SkewedInfo(); + skewed.setSkewedColNames(new ArrayList(proto.getSkewedInfo().getSkewedColNamesList())); + for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList innerList : + proto.getSkewedInfo().getSkewedColValuesList()) { + skewed.addToSkewedColValues(new ArrayList(innerList.getSkewedColValueList())); + } + Map, String> colMaps = new HashMap, String>(); + for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap map : + proto.getSkewedInfo().getSkewedColValueLocationMapsList()) { + colMaps.put(new ArrayList(map.getKeyList()), map.getValue()); + } + skewed.setSkewedColValueLocationMaps(colMaps); + sd.setSkewedInfo(skewed); + sd.setStoredAsSubDirectories(proto.getStoredAsSubDirectories()); + return sd; } - private static Map> readPrivilege(DataInput in) - throws IOException { - int sz = in.readInt(); - if (sz == 0) { - return new HashMap>(); + /** + * Serialize a partition + * @param part partition object + * @param sdHash hash that is being used as a key for the enclosed storage descriptor + * @return First element is the key, second is the serialized partition + */ + static byte[][] serializePartition(Partition part, byte[] sdHash) { + byte[][] result = new byte[2][]; + result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), part.getValues()); + HbaseMetastoreProto.Partition.Builder builder = HbaseMetastoreProto.Partition.newBuilder(); + builder + .setCreateTime(part.getCreateTime()) + .setLastAccessTime(part.getLastAccessTime()); + if (part.getSd().getLocation() != null) builder.setLocation(part.getSd().getLocation()); + if (part.getSd().getParameters() != null) { + builder.setSdParameters(buildParameters(part.getSd().getParameters())); + } + builder.setSdHash(ByteString.copyFrom(sdHash)); + if (part.getParameters() != null) builder.setParameters(buildParameters(part.getParameters())); + result[1] = builder.build().toByteArray(); + return result; + } + + static byte[] buildPartitionKey(String dbName, String tableName, List partVals) { + Deque keyParts = new ArrayDeque(partVals); + keyParts.addFirst(tableName); + keyParts.addFirst(dbName); + return buildKey(keyParts.toArray(new String[keyParts.size()])); + } + + static class StorageDescriptorParts { + byte[] sdHash; + String location; + Map parameters; + Partition containingPartition; + Table containingTable; + } + + static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { + sd.setLocation(parts.location); + sd.setParameters(parts.parameters); + if (parts.containingPartition != null) { + parts.containingPartition.setSd(sd); + } else if (parts.containingTable != null) { + parts.containingTable.setSd(sd); } else { - Map> priv = - new HashMap>(sz); - for (int i = 0; i < sz; i++) { - String key = readStr(in); - int numGrants = in.readInt(); - List grants = new ArrayList(numGrants); - priv.put(key, grants); - for (int j = 0; j < numGrants; j++) { - PrivilegeGrantInfo pgi = new PrivilegeGrantInfo(); - pgi.setPrivilege(readStr(in)); - pgi.setCreateTime(in.readInt()); - pgi.setGrantor(readStr(in)); - pgi.setGrantorType(PrincipalType.findByValue(in.readInt())); - pgi.setGrantOption(in.readBoolean()); - grants.add(pgi); - } - } - return priv; + throw new RuntimeException("Need either a partition or a table"); } } - static byte[] writePrivileges(PrincipalPrivilegeSet privSet) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - writePrivileges(dos, privSet); - return baos.toByteArray(); + /** + * Deserialize a partition. This version should be used when the partition key is not already + * known (eg a scan). + * @param key the key fetched from HBase + * @param serialized the value fetched from HBase + * @return A struct that contains the partition plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializePartition(byte[] key, byte[] serialized) + throws InvalidProtocolBufferException { + String[] keys = deserializeKey(key); + return deserializePartition(keys[0], keys[1], + Arrays.asList(Arrays.copyOfRange(keys, 2, keys.length)), serialized); } - static void writePrivileges(DataOutput out, PrincipalPrivilegeSet privs) throws IOException { - if (privs == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - writePrivilege(out, privs.getUserPrivileges()); - writePrivilege(out, privs.getRolePrivileges()); - // we ignore group privileges because we don't support old auth - } + /** + * Deserialize a partition. This version should be used when the partition key is + * known (eg a get). + * @param dbName database name + * @param tableName table name + * @param partVals partition values + * @param serialized the value fetched from HBase + * @return A struct that contains the partition plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializePartition(String dbName, String tableName, + List partVals, byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.Partition proto = HbaseMetastoreProto.Partition.parseFrom(serialized); + Partition part = new Partition(); + StorageDescriptorParts sdParts = new StorageDescriptorParts(); + sdParts.containingPartition = part; + part.setDbName(dbName); + part.setTableName(tableName); + part.setValues(partVals); + part.setCreateTime((int)proto.getCreateTime()); + part.setLastAccessTime((int)proto.getLastAccessTime()); + sdParts.location = proto.getLocation(); + sdParts.parameters = buildParameters(proto.getSdParameters()); + sdParts.sdHash = proto.getSdHash().toByteArray(); + part.setParameters(buildParameters(proto.getParameters())); + return sdParts; + } + + private static String[] deserializeKey(byte[] key) { + String k = new String(key, ENCODING); + return k.split(":"); } - private static void writePrivilege(DataOutput out, Map> priv) - throws IOException { - if (priv == null || priv.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(priv.size()); - for (Map.Entry> e : priv.entrySet()) { - writeStr(out, e.getKey()); - List grants = e.getValue(); - if (grants == null || grants.size() == 0) { - out.writeInt(0); - } else { - out.writeInt(grants.size()); - for (PrivilegeGrantInfo grant : grants) { - writeStr(out, grant.getPrivilege()); - out.writeInt(grant.getCreateTime()); - writeStr(out, grant.getGrantor()); - out.writeInt(grant.getGrantorType().getValue()); - out.writeBoolean(grant.isGrantOption()); - } - } - } + /** + * Serialize a table + * @param table table object + * @param sdHash hash that is being used as a key for the enclosed storage descriptor + * @return First element is the key, second is the serialized table + */ + static byte[][] serializeTable(Table table, byte[] sdHash) { + byte[][] result = new byte[2][]; + result[0] = buildKey(table.getDbName(), table.getTableName()); + HbaseMetastoreProto.Table.Builder builder = HbaseMetastoreProto.Table.newBuilder(); + if (table.getOwner() != null) builder.setOwner(table.getOwner()); + builder + .setCreateTime(table.getCreateTime()) + .setLastAccessTime(table.getLastAccessTime()) + .setRetention(table.getRetention()); + if (table.getSd().getLocation() != null) builder.setLocation(table.getSd().getLocation()); + if (table.getSd().getParameters() != null) { + builder.setSdParameters(buildParameters(table.getSd().getParameters())); } - } - - static void writeEnum(DataOutput out, TEnum pt) throws IOException { - if (pt == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeInt(pt.getValue()); + builder.setSdHash(ByteString.copyFrom(sdHash)); + if (table.getPartitionKeys() != null) { + builder.addAllPartitionKeys(convertFieldSchemaListToProto(table.getPartitionKeys())); } + if (table.getParameters() != null) { + builder.setParameters(buildParameters(table.getParameters())); + } + if (table.getViewOriginalText() != null) { + builder.setViewOriginalText(table.getViewOriginalText()); + } + if (table.getViewExpandedText() != null) { + builder.setViewExpandedText(table.getViewExpandedText()); + } + if (table.getTableType() != null) builder.setTableType(table.getTableType()); + if (table.getPrivileges() != null) { + builder.setPrivileges(buildPrincipalPrivilegeSet(table.getPrivileges())); + } + builder.setIsTemporary(table.isTemporary()); + result[1] = builder.build().toByteArray(); + return result; } - static PrincipalType readPrincipalType(DataInput in) throws IOException { - return (in.readBoolean()) ? PrincipalType.findByValue(in.readInt()) : null; - } - - static void writeSkewedInfo(DataOutput out, SkewedInfo skew) throws IOException { - if (skew == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - writeStrList(out, skew.getSkewedColNames()); - writeStrListList(out, skew.getSkewedColValues()); - writeStrListStrMap(out, skew.getSkewedColValueLocationMaps()); - } + /** + * Deserialize a table. This version should be used when the table key is not already + * known (eg a scan). + * @param key the key fetched from HBase + * @param serialized the value fetched from HBase + * @return A struct that contains the table plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializeTable(byte[] key, byte[] serialized) + throws InvalidProtocolBufferException { + String[] keys = deserializeKey(key); + return deserializeTable(keys[0], keys[1], serialized); } - static SkewedInfo readSkewedInfo(DataInput in) throws IOException { - if (in.readBoolean()) { - SkewedInfo skew = new SkewedInfo(); - skew.setSkewedColNames(readStrList(in)); - skew.setSkewedColValues(readStrListList(in)); - skew.setSkewedColValueLocationMaps(readStrListStrMap(in)); - return skew; - } else { - return new SkewedInfo(new ArrayList(), new ArrayList>(), - new HashMap, String>()); - } - } - - static byte[] serializeStorageDescriptor(StorageDescriptor sd) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - writeFieldSchemaList(dos, sd.getCols()); - writeStr(dos, sd.getInputFormat()); - writeStr(dos, sd.getOutputFormat()); - dos.writeBoolean(sd.isCompressed()); - dos.writeInt(sd.getNumBuckets()); - writeStr(dos, sd.getSerdeInfo().getName()); - writeStr(dos, sd.getSerdeInfo().getSerializationLib()); - writeStrStrMap(dos, sd.getSerdeInfo().getParameters()); - writeStrList(dos, sd.getBucketCols()); - writeOrderList(dos, sd.getSortCols()); - writeSkewedInfo(dos, sd.getSkewedInfo()); - dos.writeBoolean(sd.isStoredAsSubDirectories()); - return baos.toByteArray(); - } - - static void deserializeStorageDescriptor(StorageDescriptor sd, byte[] bytes) - throws IOException { - DataInput in = new DataInputStream(new ByteArrayInputStream(bytes)); - sd.setCols(readFieldSchemaList(in)); - sd.setInputFormat(readStr(in)); - sd.setOutputFormat(readStr(in)); - sd.setCompressed(in.readBoolean()); - sd.setNumBuckets(in.readInt()); - SerDeInfo serde = new SerDeInfo(readStr(in), readStr(in), readStrStrMap(in)); - sd.setSerdeInfo(serde); - sd.setBucketCols(readStrList(in)); - sd.setSortCols(readOrderList(in)); - sd.setSkewedInfo(readSkewedInfo(in)); - sd.setStoredAsSubDirectories(in.readBoolean()); + /** + * Deserialize a table. This version should be used when the table key is + * known (eg a get). + * @param dbName database name + * @param tableName table name + * @param serialized the value fetched from HBase + * @return A struct that contains the partition plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializeTable(String dbName, String tableName, + byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.Table proto = HbaseMetastoreProto.Table.parseFrom(serialized); + Table table = new Table(); + StorageDescriptorParts sdParts = new StorageDescriptorParts(); + sdParts.containingTable = table; + table.setDbName(dbName); + table.setTableName(tableName); + table.setOwner(proto.getOwner()); + table.setCreateTime((int)proto.getCreateTime()); + table.setLastAccessTime((int)proto.getLastAccessTime()); + table.setRetention((int)proto.getRetention()); + sdParts.location = proto.getLocation(); + sdParts.parameters = buildParameters(proto.getSdParameters()); + sdParts.sdHash = proto.getSdHash().toByteArray(); + table.setPartitionKeys(convertFieldSchemaListFromProto(proto.getPartitionKeysList())); + table.setParameters(buildParameters(proto.getParameters())); + table.setViewOriginalText(proto.getViewOriginalText()); + table.setViewExpandedText(proto.getViewExpandedText()); + table.setTableType(proto.getTableType()); + table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); + table.setTemporary(proto.getIsTemporary()); + return sdParts; } static byte[] serializeStatsForOneColumn(ColumnStatistics stats, ColumnStatisticsObj obj) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dos = new DataOutputStream(baos); - dos.writeLong(stats.getStatsDesc().getLastAnalyzed()); - HBaseUtils.writeStr(dos, obj.getColType()); + HbaseMetastoreProto.ColumnStats.Builder builder = HbaseMetastoreProto.ColumnStats.newBuilder(); + builder.setLastAnalyzed(stats.getStatsDesc().getLastAnalyzed()); + if (obj.getColType() == null) { + throw new RuntimeException("Column type must be set"); + } + builder.setColumnType(obj.getColType()); ColumnStatisticsData colData = obj.getStatsData(); - HBaseUtils.writeStr(dos, colData.getSetField().toString()); switch (colData.getSetField()) { case BOOLEAN_STATS: BooleanColumnStatsData boolData = colData.getBooleanStats(); - dos.writeLong(boolData.getNumTrues()); - dos.writeLong(boolData.getNumFalses()); - dos.writeLong(boolData.getNumNulls()); + builder.setNumNulls(boolData.getNumNulls()); + builder.setBoolStats( + HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() + .setNumTrues(boolData.getNumTrues()) + .setNumFalses(boolData.getNumFalses()) + .build()); break; case LONG_STATS: LongColumnStatsData longData = colData.getLongStats(); - dos.writeLong(longData.getLowValue()); - dos.writeLong(longData.getHighValue()); - dos.writeLong(longData.getNumNulls()); - dos.writeLong(longData.getNumDVs()); + builder.setNumNulls(longData.getNumNulls()); + builder.setNumDistinctValues(longData.getNumDVs()); + builder.setLongStats( + HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() + .setLowValue(longData.getLowValue()) + .setHighValue(longData.getHighValue()) + .build()); break; case DOUBLE_STATS: DoubleColumnStatsData doubleData = colData.getDoubleStats(); - dos.writeDouble(doubleData.getLowValue()); - dos.writeDouble(doubleData.getHighValue()); - dos.writeLong(doubleData.getNumNulls()); - dos.writeLong(doubleData.getNumDVs()); + builder.setNumNulls(doubleData.getNumNulls()); + builder.setNumDistinctValues(doubleData.getNumDVs()); + builder.setDoubleStats( + HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() + .setLowValue(doubleData.getLowValue()) + .setHighValue(doubleData.getHighValue()) + .build()); break; case STRING_STATS: StringColumnStatsData stringData = colData.getStringStats(); - dos.writeLong(stringData.getMaxColLen()); - dos.writeDouble(stringData.getAvgColLen()); - dos.writeLong(stringData.getNumNulls()); - dos.writeLong(stringData.getNumDVs()); + builder.setNumNulls(stringData.getNumNulls()); + builder.setNumDistinctValues(stringData.getNumDVs()); + builder.setStringStats( + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() + .setMaxColLength(stringData.getMaxColLen()) + .setAvgColLength(stringData.getAvgColLen()) + .build()); break; case BINARY_STATS: BinaryColumnStatsData binaryData = colData.getBinaryStats(); - dos.writeLong(binaryData.getMaxColLen()); - dos.writeDouble(binaryData.getAvgColLen()); - dos.writeLong(binaryData.getNumNulls()); + builder.setNumNulls(binaryData.getNumNulls()); + builder.setBinaryStats( + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() + .setMaxColLength(binaryData.getMaxColLen()) + .setAvgColLength(binaryData.getAvgColLen()) + .build()); break; case DECIMAL_STATS: DecimalColumnStatsData decimalData = colData.getDecimalStats(); - writeDecimal(dos, decimalData.getHighValue()); - writeDecimal(dos, decimalData.getLowValue()); - dos.writeLong(decimalData.getNumNulls()); - dos.writeLong(decimalData.getNumDVs()); + builder.setNumNulls(decimalData.getNumNulls()); + builder.setNumDistinctValues(decimalData.getNumDVs()); + builder.setDecimalStats( + HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder() + .setLowValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) + .setScale(decimalData.getLowValue().getScale()) + .build()) + .setHighValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) + .setScale(decimalData.getHighValue().getScale()) + .build())) + .build(); break; default: throw new RuntimeException("Woh, bad. Unknown stats type!"); } - return baos.toByteArray(); + return builder.build().toByteArray(); } static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics stats, byte[] bytes) throws IOException { - DataInput in = new DataInputStream(new ByteArrayInputStream(bytes)); - ColumnStatisticsObj obj = new ColumnStatisticsObj(); - long lastAnalyzed = in.readLong(); + HbaseMetastoreProto.ColumnStats proto = HbaseMetastoreProto.ColumnStats.parseFrom(bytes); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + long lastAnalyzed = proto.getLastAnalyzed(); stats.getStatsDesc().setLastAnalyzed( Math.max(lastAnalyzed, stats.getStatsDesc().getLastAnalyzed())); - obj.setColType(HBaseUtils.readStr(in)); + obj.setColType(proto.getColumnType()); - ColumnStatisticsData._Fields type = ColumnStatisticsData._Fields.valueOf(HBaseUtils.readStr (in)); ColumnStatisticsData colData = new ColumnStatisticsData(); - switch (type) { - case BOOLEAN_STATS: - BooleanColumnStatsData boolData = new BooleanColumnStatsData(); - boolData.setNumTrues(in.readLong()); - boolData.setNumFalses(in.readLong()); - boolData.setNumNulls(in.readLong()); - colData.setBooleanStats(boolData); - break; - - case LONG_STATS: - LongColumnStatsData longData = new LongColumnStatsData(); - longData.setLowValue(in.readLong()); - longData.setHighValue(in.readLong()); - longData.setNumNulls(in.readLong()); - longData.setNumDVs(in.readLong()); - colData.setLongStats(longData); - break; - - case DOUBLE_STATS: - DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); - doubleData.setLowValue(in.readDouble()); - doubleData.setHighValue(in.readDouble()); - doubleData.setNumNulls(in.readLong()); - doubleData.setNumDVs(in.readLong()); - colData.setDoubleStats(doubleData); - break; - - case STRING_STATS: - StringColumnStatsData stringData = new StringColumnStatsData(); - stringData.setMaxColLen(in.readLong()); - stringData.setAvgColLen(in.readDouble()); - stringData.setNumNulls(in.readLong()); - stringData.setNumDVs(in.readLong()); - colData.setStringStats(stringData); - break; - - case BINARY_STATS: - BinaryColumnStatsData binaryData = new BinaryColumnStatsData(); - binaryData.setMaxColLen(in.readLong()); - binaryData.setAvgColLen(in.readDouble()); - binaryData.setNumNulls(in.readLong()); - colData.setBinaryStats(binaryData); - break; - - case DECIMAL_STATS: - DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); - decimalData.setHighValue(readDecimal(in)); - decimalData.setLowValue(readDecimal(in)); - decimalData.setNumNulls(in.readLong()); - decimalData.setNumDVs(in.readLong()); - colData.setDecimalStats(decimalData); - break; - - default: - throw new RuntimeException("Woh, bad. Unknown stats type!"); + if (proto.hasBoolStats()) { + BooleanColumnStatsData boolData = new BooleanColumnStatsData(); + boolData.setNumTrues(proto.getBoolStats().getNumTrues()); + boolData.setNumFalses(proto.getBoolStats().getNumFalses()); + boolData.setNumNulls(proto.getNumNulls()); + colData.setBooleanStats(boolData); + } else if (proto.hasLongStats()) { + LongColumnStatsData longData = new LongColumnStatsData(); + longData.setLowValue(proto.getLongStats().getLowValue()); + longData.setHighValue(proto.getLongStats().getHighValue()); + longData.setNumNulls(proto.getNumNulls()); + longData.setNumDVs(proto.getNumDistinctValues()); + colData.setLongStats(longData); + } else if (proto.hasDoubleStats()) { + DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); + doubleData.setLowValue(proto.getDoubleStats().getLowValue()); + doubleData.setHighValue(proto.getDoubleStats().getHighValue()); + doubleData.setNumNulls(proto.getNumNulls()); + doubleData.setNumDVs(proto.getNumDistinctValues()); + colData.setDoubleStats(doubleData); + } else if (proto.hasStringStats()) { + StringColumnStatsData stringData = new StringColumnStatsData(); + stringData.setMaxColLen(proto.getStringStats().getMaxColLength()); + stringData.setAvgColLen(proto.getStringStats().getAvgColLength()); + stringData.setNumNulls(proto.getNumNulls()); + stringData.setNumDVs(proto.getNumDistinctValues()); + colData.setStringStats(stringData); + } else if (proto.hasBinaryStats()) { + BinaryColumnStatsData binaryData = new BinaryColumnStatsData(); + binaryData.setMaxColLen(proto.getBinaryStats().getMaxColLength()); + binaryData.setAvgColLen(proto.getBinaryStats().getAvgColLength()); + binaryData.setNumNulls(proto.getNumNulls()); + colData.setBinaryStats(binaryData); + } else if (proto.hasDecimalStats()) { + DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); + Decimal hiVal = new Decimal(); + hiVal.setUnscaled(proto.getDecimalStats().getHighValue().getUnscaled().toByteArray()); + hiVal.setScale((short) proto.getDecimalStats().getHighValue().getScale()); + decimalData.setHighValue(hiVal); + Decimal loVal = new Decimal(); + loVal.setUnscaled(proto.getDecimalStats().getLowValue().getUnscaled().toByteArray()); + loVal.setScale((short) proto.getDecimalStats().getLowValue().getScale()); + decimalData.setLowValue(loVal); + decimalData.setNumNulls(proto.getNumNulls()); + decimalData.setNumDVs(proto.getNumDistinctValues()); + colData.setDecimalStats(decimalData); + } else { + throw new RuntimeException("Woh, bad. Unknown stats type!"); } + obj.setStatsData(colData); return obj; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java deleted file mode 100644 index 34881a3..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionWritable.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Wrapper for {@link org.apache.hadoop.hive.metastore.api.Table} that makes it writable - */ -class PartitionWritable implements Writable { - final Partition part; - - PartitionWritable() { - this.part = new Partition(); - } - - PartitionWritable(Partition part) { - this.part = part; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStrList(out, part.getValues()); - // TODO should be able to avoid dbname and tablename since they're in the key - HBaseUtils.writeStr(out, part.getDbName()); - HBaseUtils.writeStr(out, part.getTableName()); - out.writeInt(part.getCreateTime()); - out.writeInt(part.getLastAccessTime()); - new StorageDescriptorWritable(part.getSd()).write(out); - HBaseUtils.writeStrStrMap(out, part.getParameters()); - HBaseUtils.writePrivileges(out, part.getPrivileges()); - } - - @Override - public void readFields(DataInput in) throws IOException { - part.setValues(HBaseUtils.readStrList(in)); - part.setDbName(HBaseUtils.readStr(in)); - part.setTableName(HBaseUtils.readStr(in)); - part.setCreateTime(in.readInt()); - part.setLastAccessTime(in.readInt()); - StorageDescriptorWritable sdw = new StorageDescriptorWritable(); - sdw.readFields(in); - part.setSd(sdw.sd); - part.setParameters(HBaseUtils.readStrStrMap(in)); - part.setPrivileges(HBaseUtils.readPrivileges(in)); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java deleted file mode 100644 index 1ad1fe5..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/RoleWritable.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Wrapper for {@link org.apache.hadoop.hive.metastore.api.Table} that makes it writable - */ -class RoleWritable implements Writable { - final Role role; - - RoleWritable() { - this.role = new Role(); - } - - RoleWritable(Role role) { - this.role = role; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStr(out, role.getRoleName()); - out.writeInt(role.getCreateTime()); - HBaseUtils.writeStr(out, role.getOwnerName()); - } - - @Override - public void readFields(DataInput in) throws IOException { - role.setRoleName(HBaseUtils.readStr(in)); - role.setCreateTime(in.readInt()); - role.setOwnerName(HBaseUtils.readStr(in)); - } -} \ No newline at end of file diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StorageDescriptorWritable.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StorageDescriptorWritable.java deleted file mode 100644 index 94a8242..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StorageDescriptorWritable.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Wrapper for {@link org.apache.hadoop.hive.metastore.api.StorageDescriptor} to make it writable. - */ -public class StorageDescriptorWritable implements Writable { - static final private Log LOG = LogFactory.getLog(StorageDescriptorWritable.class.getName()); - final StorageDescriptor sd; - - StorageDescriptorWritable() { - sd = new SharedStorageDescriptor(); - } - - StorageDescriptorWritable(StorageDescriptor sd) { - this.sd = sd; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStr(out, sd.getLocation()); - HBaseUtils.writeStrStrMap(out, sd.getParameters()); - byte[] hash = HBaseReadWrite.getInstance().putStorageDescriptor(sd); - out.writeInt(hash.length); - out.write(hash); - } - - @Override - public void readFields(DataInput in) throws IOException { - sd.setLocation(HBaseUtils.readStr(in)); - sd.setParameters(HBaseUtils.readStrStrMap(in)); - int len = in.readInt(); - byte[] hash = new byte[len]; - in.readFully(hash, 0, len); - ((SharedStorageDescriptor)sd).readShared(hash); - } - - -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TableWritable.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TableWritable.java deleted file mode 100644 index 71df26b..0000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TableWritable.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Wrapper for {@link org.apache.hadoop.hive.metastore.api.Table} that makes it writable - */ -class TableWritable implements Writable { - static final private Log LOG = LogFactory.getLog(TableWritable.class.getName()); - final Table table; - - TableWritable() { - this.table = new Table(); - } - - TableWritable(Table table) { - this.table = table; - } - - @Override - public void write(DataOutput out) throws IOException { - HBaseUtils.writeStr(out, table.getTableName()); - HBaseUtils.writeStr(out, table.getDbName()); - HBaseUtils.writeStr(out, table.getOwner()); - out.writeInt(table.getCreateTime()); - out.writeInt(table.getLastAccessTime()); - out.writeInt(table.getRetention()); - new StorageDescriptorWritable(table.getSd()).write(out); - HBaseUtils.writeFieldSchemaList(out, table.getPartitionKeys()); - HBaseUtils.writeStrStrMap(out, table.getParameters()); - HBaseUtils.writeStr(out, table.getViewOriginalText()); - HBaseUtils.writeStr(out, table.getViewExpandedText()); - HBaseUtils.writeStr(out, table.getTableType()); - HBaseUtils.writePrivileges(out, table.getPrivileges()); - out.writeBoolean(table.isTemporary()); - } - - @Override - public void readFields(DataInput in) throws IOException { - table.setTableName(HBaseUtils.readStr(in)); - table.setDbName(HBaseUtils.readStr(in)); - table.setOwner(HBaseUtils.readStr(in)); - table.setCreateTime(in.readInt()); - table.setLastAccessTime(in.readInt()); - table.setRetention(in.readInt()); - StorageDescriptorWritable sdw = new StorageDescriptorWritable(); - sdw.readFields(in); - table.setSd(sdw.sd); - table.setPartitionKeys(HBaseUtils.readFieldSchemaList(in)); - table.setParameters(HBaseUtils.readStrStrMap(in)); - table.setViewOriginalText(HBaseUtils.readStr(in)); - table.setViewExpandedText(HBaseUtils.readStr(in)); - table.setTableType(HBaseUtils.readStr(in)); - table.setPrivileges(HBaseUtils.readPrivileges(in)); - table.setTemporary(in.readBoolean()); - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java new file mode 100644 index 0000000..3f3f4a7 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java @@ -0,0 +1,110 @@ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Result; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A pass through to a simple HBase connection. This has no transactions. + */ +public class VanillaHBaseConnection implements HBaseConnection { + static final private Log LOG = LogFactory.getLog(VanillaHBaseConnection.class.getName()); + + + private HConnection conn; + private Map tables; + Configuration conf; + + VanillaHBaseConnection() { + } + + @Override + public void connect() throws IOException { + tables = new HashMap(); + if (conf == null) throw new RuntimeException("Must call getConf before connect"); + conn = HConnectionManager.createConnection(conf); + } + + @Override + public void close() throws IOException { + for (HTableInterface htab : tables.values()) htab.close(); + } + + @Override + public void beginTransaction() throws IOException { + + } + + @Override + public void commitTransaction() throws IOException { + + } + + @Override + public void rollbackTransaction() throws IOException { + + } + + @Override + public void createHBaseTable(String tableName, List columnFamilies) throws + IOException { + HBaseAdmin admin = new HBaseAdmin(conn); + LOG.info("Creating HBase table " + tableName); + HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); + for (byte[] cf : columnFamilies) { + tableDesc.addFamily(new HColumnDescriptor(cf)); + } + admin.createTable(tableDesc); + admin.close(); + } + + @Override + public HTableInterface getHBaseTable(String tableName) throws IOException { + return getHBaseTable(tableName, false); + } + + @Override + public HTableInterface getHBaseTable(String tableName, boolean force) throws IOException { + HTableInterface htab = tables.get(tableName); + if (htab == null) { + LOG.debug("Trying to connect to table " + tableName); + try { + htab = conn.getTable(tableName); + // Calling gettable doesn't actually connect to the region server, it's very light + // weight, so call something else so we actually reach out and touch the region server + // and see if the table is there. + if (force) htab.get(new Get("nosuchkey".getBytes(HBaseUtils.ENCODING))); + } catch (IOException e) { + LOG.info("Caught exception when table was missing"); + return null; + } + htab.setAutoFlushTo(false); + tables.put(tableName, htab); + } + return htab; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } +} diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto new file mode 100644 index 0000000..80e7f09 --- /dev/null +++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +enum PrincipalType { + USER = 0; + ROLE = 1; +} + +message ColumnStats { + + message BooleanStats { + optional int64 num_trues = 1; + optional int64 num_falses = 2; + } + + message LongStats { + optional sint64 low_value = 1; + optional sint64 high_value = 2; + } + + message DoubleStats { + optional double low_value = 1; + optional double high_value = 2; + } + + message StringStats { + optional int64 max_col_length = 1; + optional double avg_col_length = 2; + } + + message DecimalStats { + message Decimal { + required bytes unscaled = 1; + required int32 scale = 2; + } + optional Decimal low_value = 1; + optional Decimal high_value = 2; + } + + optional int64 last_analyzed = 1; + required string column_type = 2; + optional int64 num_nulls = 3; + optional int64 num_distinct_values = 4; + optional BooleanStats bool_stats = 5; + optional LongStats long_stats = 6; + optional DoubleStats double_stats = 7; + optional StringStats string_stats = 8; + optional StringStats binary_stats = 9; + optional DecimalStats decimal_stats = 10; +} + + + +message Database { + optional string description = 1; + optional string uri = 2; + optional Parameters parameters = 3; + optional PrincipalPrivilegeSet privileges = 4; + optional string owner_name = 5; + optional PrincipalType owner_type = 6; +} + +message FieldSchema { + required string name = 1; + required string type = 2; + optional string comment = 3; +} + +message ParameterEntry { + required string key = 1; + required string value = 2; +} + +message Parameters { + repeated ParameterEntry parameter = 1; +} + +message Partition { + optional int64 create_time = 1; + optional int64 last_access_time = 2; + optional string location = 3; + optional Parameters sd_parameters = 4; // storage descriptor parameters + required bytes sd_hash = 5; + optional Parameters parameters = 6; // partition parameters + // We don't support partition level privileges +} + +message PrincipalPrivilegeSetEntry { + required string principal_name = 1; + repeated PrivilegeGrantInfo privileges = 2; +} + +message PrincipalPrivilegeSet { + repeated PrincipalPrivilegeSetEntry users = 1; + repeated PrincipalPrivilegeSetEntry roles = 2; +} + +message PrivilegeGrantInfo { + optional string privilege = 1; + optional int64 create_time = 2; + optional string grantor = 3; + optional PrincipalType grantor_type = 4; + optional bool grant_option = 5; +} + +message RoleGrantInfo { + required string principal_name = 1; + required PrincipalType principal_type = 2; + optional int64 add_time = 3; + optional string grantor = 4; + optional PrincipalType grantor_type = 5; + optional bool grant_option = 6; +} + +message RoleGrantInfoList { + repeated RoleGrantInfo grant_info = 1; +} + +message RoleList { + repeated string role = 1; +} + +message Role { + optional int64 create_time = 1; + optional string owner_name = 2; +} + +message StorageDescriptor { + message Order { + required string column_name = 1; + optional sint32 order = 2 [default = 1]; + } + + message SerDeInfo { + optional string name = 1; + optional string serialization_lib = 2; + optional Parameters parameters = 3; + } + + message SkewedInfo { + message SkewedColValueList { + repeated string skewed_col_value = 1; + } + + message SkewedColValueLocationMap { + repeated string key = 1; + required string value = 2; + } + + repeated string skewed_col_names = 1; + repeated SkewedColValueList skewed_col_values = 2; + repeated SkewedColValueLocationMap skewed_col_value_location_maps = 3; + } + + repeated FieldSchema cols = 1; + optional string input_format = 2; + optional string output_format = 3; + optional bool is_compressed = 4; + optional sint32 num_buckets = 5; + optional SerDeInfo serde_info = 6; + repeated string bucket_cols = 7; + repeated Order sort_cols = 8; + optional SkewedInfo skewed_info = 9; + optional bool stored_as_sub_directories = 10; +} + +message Table { + optional string owner = 1; + optional int64 create_time = 2; + optional int64 last_access_time = 3; + optional int64 retention = 4; + optional string location = 5; + optional Parameters sd_parameters = 6; // storage descriptor parameters + required bytes sd_hash = 7; + repeated FieldSchema partition_keys = 8; + optional Parameters parameters = 9; + optional string view_original_text = 10; + optional string view_expanded_text = 11; + optional string table_type = 12; + optional PrincipalPrivilegeSet privileges = 13; + optional bool is_temporary = 14; +} + + + + + diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java index 9da2a10..bc99633 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hive.conf.HiveConf; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -142,8 +143,9 @@ public Boolean answer(InvocationOnMock invocation) throws Throwable { }); // Mock connection - HConnection hconn = Mockito.mock(HConnection.class); - Mockito.when(hconn.getTable(Mockito.anyString())).thenReturn(htable); + HBaseConnection hconn = Mockito.mock(HBaseConnection.class); + Mockito.when(hconn.getHBaseTable(Mockito.anyString())).thenReturn(htable); + HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); hbase.setConnection(hconn); HBaseStore store = new HBaseStore(); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index 857e3f0..0680e85 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -44,9 +44,11 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; @@ -63,6 +65,7 @@ import org.mockito.stubbing.Answer; import java.io.IOException; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -137,10 +140,56 @@ public void createTable() throws Exception { String tableName = "mytable"; int startTime = (int)(System.currentTimeMillis() / 1000); List cols = new ArrayList(); - cols.add(new FieldSchema("col1", "int", "nocomment")); + cols.add(new FieldSchema("col1", "int", "")); SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); - StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + Map params = new HashMap(); + params.put("key", "value"); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertFalse(t.getSd().isCompressed()); + Assert.assertEquals(17, t.getSd().getNumBuckets()); + Assert.assertEquals(1, t.getSd().getBucketColsSize()); + Assert.assertEquals("bucketcol", t.getSd().getBucketCols().get(0)); + Assert.assertEquals(1, t.getSd().getSortColsSize()); + Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol()); + Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder()); + Assert.assertEquals(1, t.getSd().getParametersSize()); + Assert.assertEquals("value", t.getSd().getParameters().get("key")); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(0, t.getParametersSize()); + } + + @Test + public void skewInfo() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, serde, null, null, emptyParameters); + + Map, String> map = new HashMap, String>(); + map.put(Arrays.asList("col3"), "col4"); + SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), + map); + sd.setSkewedInfo(skew); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null); store.createTable(table); @@ -149,15 +198,57 @@ public void createTable() throws Exception { Assert.assertEquals(1, t.getSd().getColsSize()); Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); - Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("", t.getSd().getCols().get(0).getComment()); Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); Assert.assertEquals("file:/tmp", t.getSd().getLocation()); Assert.assertEquals("input", t.getSd().getInputFormat()); Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertTrue(t.getSd().isCompressed()); + Assert.assertEquals(0, t.getSd().getNumBuckets()); + Assert.assertEquals(0, t.getSd().getSortColsSize()); Assert.assertEquals("me", t.getOwner()); Assert.assertEquals("default", t.getDbName()); Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(0, t.getParametersSize()); + + skew = t.getSd().getSkewedInfo(); + Assert.assertNotNull(skew); + Assert.assertEquals(1, skew.getSkewedColNamesSize()); + Assert.assertEquals("col1", skew.getSkewedColNames().get(0)); + Assert.assertEquals(1, skew.getSkewedColValuesSize()); + Assert.assertEquals("col2", skew.getSkewedColValues().get(0).get(0)); + Assert.assertEquals(1, skew.getSkewedColValueLocationMapsSize()); + Assert.assertEquals("col4", skew.getSkewedColValueLocationMaps().get(Arrays.asList("col3"))); + + } + + @Test + public void hashSd() throws Exception { + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, + serde, null, null, emptyParameters); + + Map, String> map = new HashMap, String>(); + map.put(Arrays.asList("col3"), "col4"); + SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), + map); + sd.setSkewedInfo(skew); + + MessageDigest md = MessageDigest.getInstance("MD5"); + byte[] baseHash = HBaseUtils.hashStorageDescriptor(sd, md); + + StorageDescriptor changeSchema = new StorageDescriptor(sd); + changeSchema.getCols().add(new FieldSchema("col2", "varchar(32)", "a comment")); + byte[] schemaHash = HBaseUtils.hashStorageDescriptor(changeSchema, md); + Assert.assertFalse(Arrays.equals(baseHash, schemaHash)); + + StorageDescriptor changeLocation = new StorageDescriptor(sd); + changeLocation.setLocation("file:/somewhere/else"); + byte[] locationHash = HBaseUtils.hashStorageDescriptor(changeLocation, md); + Assert.assertArrayEquals(baseHash, locationHash); } @Test diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseUtils.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseUtils.java deleted file mode 100644 index 95caed4..0000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseUtils.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.hive.metastore.hbase; - -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.junit.Assert; -import org.junit.Test; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; - -public class TestHBaseUtils { - - @Test - public void privilegeSerialization() throws Exception { - PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); - pps.setUserPrivileges(new HashMap>()); - pps.setRolePrivileges(new HashMap>()); - - pps.getUserPrivileges().put("fred", Arrays.asList(new PrivilegeGrantInfo("read", 1, "daphne", - PrincipalType.USER, true))); - pps.getUserPrivileges().put("wilma", Arrays.asList(new PrivilegeGrantInfo("write", 1, - "scooby", PrincipalType.USER, false))); - pps.getRolePrivileges().put("role1", Arrays.asList(new PrivilegeGrantInfo("exec", 1, - "shaggy", PrincipalType.ROLE, true))); - - byte[] serialized = HBaseUtils.writePrivileges(pps); - pps = HBaseUtils.readPrivileges(serialized); - - Assert.assertEquals(2, pps.getUserPrivileges().size()); - Assert.assertEquals(1, pps.getUserPrivileges().get("fred").size()); - PrivilegeGrantInfo pgi = pps.getUserPrivileges().get("fred").get(0); - Assert.assertEquals("read", pgi.getPrivilege()); - Assert.assertEquals(1, pgi.getCreateTime()); - Assert.assertEquals("daphne", pgi.getGrantor()); - Assert.assertEquals(PrincipalType.USER, pgi.getGrantorType()); - Assert.assertTrue(pgi.isGrantOption()); - - Assert.assertEquals(1, pps.getUserPrivileges().get("wilma").size()); - pgi = pps.getUserPrivileges().get("wilma").get(0); - Assert.assertEquals("write", pgi.getPrivilege()); - Assert.assertEquals(1, pgi.getCreateTime()); - Assert.assertEquals("scooby", pgi.getGrantor()); - Assert.assertEquals(PrincipalType.USER, pgi.getGrantorType()); - Assert.assertFalse(pgi.isGrantOption()); - - Assert.assertEquals(1, pps.getRolePrivileges().size()); - Assert.assertEquals(1, pps.getRolePrivileges().get("role1").size()); - pgi = pps.getRolePrivileges().get("role1").get(0); - Assert.assertEquals("exec", pgi.getPrivilege()); - Assert.assertEquals(1, pgi.getCreateTime()); - Assert.assertEquals("shaggy", pgi.getGrantor()); - Assert.assertEquals(PrincipalType.ROLE, pgi.getGrantorType()); - Assert.assertTrue(pgi.isGrantOption()); - } -} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java index 25849ee..bc24daa 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestStatsCache.java @@ -96,10 +96,11 @@ public Result answer(InvocationOnMock invocation) throws Throwable { } }); - HConnection hconn = Mockito.mock(HConnection.class); - Mockito.when(hconn.getTable(Mockito.anyString())).thenReturn(htable); + HBaseConnection hconn = Mockito.mock(HBaseConnection.class); + Mockito.when(hconn.getHBaseTable(Mockito.anyString())).thenReturn(htable); HiveConf conf = new HiveConf(); conf.setIntVar(HiveConf.ConfVars.METASTORE_HBASE_CACHE_SIZE, 30); + conf.setVar(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); hrw = HBaseReadWrite.getInstance(conf); hrw.setConnection(hconn); StatsCache.getInstance(conf).clear();