diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index af60660..21f851e 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -63,6 +64,7 @@ private static final String[] tableNames = new String[] {"allnonparttable", "allparttable"}; private static final String[] partVals = new String[] {"na", "emea", "latam", "apac"}; private static final String[] funcNames = new String[] {"allfunc1", "allfunc2"}; + private static final String[] indexNames = new String[] {"allindex1", "allindex2"}; private static final List masterKeySeqs = new ArrayList(); @Rule @@ -146,7 +148,11 @@ public void importAll() throws Exception { } Assert.assertEquals(4, store.getPartitions(dbNames[i], tableNames[1], -1).size()); - Assert.assertEquals(2, store.getAllTables(dbNames[i]).size()); + // Including two index table + Assert.assertEquals(4, store.getAllTables(dbNames[i]).size()); + + Assert.assertEquals(2, store.getIndexes(dbNames[i], tableNames[0], -1).size()); + Assert.assertEquals(0, store.getIndexes(dbNames[i], tableNames[1], -1).size()); Assert.assertEquals(2, store.getFunctions(dbNames[i], "*").size()); for (int j = 0; j < funcNames.length; j++) { @@ -218,7 +224,11 @@ public void importOneDb() throws Exception { } Assert.assertEquals(4, store.getPartitions(dbNames[0], tableNames[1], -1).size()); - Assert.assertEquals(2, store.getAllTables(dbNames[0]).size()); + // Including two index table + Assert.assertEquals(4, store.getAllTables(dbNames[0]).size()); + + Assert.assertEquals(2, store.getIndexes(dbNames[0], tableNames[0], -1).size()); + Assert.assertEquals(0, store.getIndexes(dbNames[0], tableNames[1], -1).size()); Assert.assertEquals(2, store.getFunctions(dbNames[0], "*").size()); for (int j = 0; j < funcNames.length; j++) { @@ -323,6 +333,9 @@ public void importOneTableNonPartitioned() throws Exception { Assert.assertEquals(1, store.getAllTables(db.getName()).size()); Assert.assertNull(store.getTable(db.getName(), tableNames[1])); + List indexes = store.getIndexes(db.getName(), tableNames[0], -1); + Assert.assertEquals(2, indexes.size()); + Assert.assertEquals(0, store.getFunctions(dbNames[0], "*").size()); Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); @@ -379,6 +392,9 @@ public void importOneTablePartitioned() throws Exception { Assert.assertNull(store.getTable(db.getName(), tableNames[0])); + List indexes = store.getIndexes(db.getName(), tableNames[1], -1); + Assert.assertEquals(0, indexes.size()); + Assert.assertEquals(0, store.getFunctions(dbNames[0], "*").size()); Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); @@ -511,6 +527,15 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, PrincipalType.USER, (int) System.currentTimeMillis() / 1000, FunctionType.JAVA, Arrays.asList(new ResourceUri(ResourceType.JAR, "uri")))); } + + for (String indexName : indexNames) { + LOG.debug("Creating new index " + dbNames[i] + "." + tableNames[0] + "." + indexName); + String indexTableName = tableNames[0] + "__" + indexName + "__"; + rdbms.createTable(new Table(indexTableName, dbNames[i], "me", now, now, 0, sd, partCols, + emptyParameters, null, null, null)); + rdbms.addIndex(new Index(indexName, null, dbNames[i], tableNames[0], + now, now, indexTableName, sd, emptyParameters, false)); + } } for (int i = 0; i < tokenIds.length; i++) rdbms.addToken(tokenIds[i], tokens[i]); for (int i = 0; i < masterKeys.length; i++) { diff --git a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java index 3b2d7b5..9f2a88c 100644 --- a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -30773,6 +30773,1960 @@ public Builder clearIsTemporary() { // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table) } + public interface IndexOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string indexHandlerClass = 1; + /** + * optional string indexHandlerClass = 1; + * + *
+     * reserved
+     * 
+ */ + boolean hasIndexHandlerClass(); + /** + * optional string indexHandlerClass = 1; + * + *
+     * reserved
+     * 
+ */ + java.lang.String getIndexHandlerClass(); + /** + * optional string indexHandlerClass = 1; + * + *
+     * reserved
+     * 
+ */ + com.google.protobuf.ByteString + getIndexHandlerClassBytes(); + + // required string dbName = 2; + /** + * required string dbName = 2; + */ + boolean hasDbName(); + /** + * required string dbName = 2; + */ + java.lang.String getDbName(); + /** + * required string dbName = 2; + */ + com.google.protobuf.ByteString + getDbNameBytes(); + + // required string origTableName = 3; + /** + * required string origTableName = 3; + */ + boolean hasOrigTableName(); + /** + * required string origTableName = 3; + */ + java.lang.String getOrigTableName(); + /** + * required string origTableName = 3; + */ + com.google.protobuf.ByteString + getOrigTableNameBytes(); + + // optional string location = 4; + /** + * optional string location = 4; + */ + boolean hasLocation(); + /** + * optional string location = 4; + */ + java.lang.String getLocation(); + /** + * optional string location = 4; + */ + com.google.protobuf.ByteString + getLocationBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+     * storage descriptor parameters
+     * 
+ */ + boolean hasSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); + + // optional int32 createTime = 6; + /** + * optional int32 createTime = 6; + */ + boolean hasCreateTime(); + /** + * optional int32 createTime = 6; + */ + int getCreateTime(); + + // optional int32 lastAccessTime = 7; + /** + * optional int32 lastAccessTime = 7; + */ + boolean hasLastAccessTime(); + /** + * optional int32 lastAccessTime = 7; + */ + int getLastAccessTime(); + + // optional string indexTableName = 8; + /** + * optional string indexTableName = 8; + */ + boolean hasIndexTableName(); + /** + * optional string indexTableName = 8; + */ + java.lang.String getIndexTableName(); + /** + * optional string indexTableName = 8; + */ + com.google.protobuf.ByteString + getIndexTableNameBytes(); + + // optional bytes sd_hash = 9; + /** + * optional bytes sd_hash = 9; + */ + boolean hasSdHash(); + /** + * optional bytes sd_hash = 9; + */ + com.google.protobuf.ByteString getSdHash(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + + // optional bool deferredRebuild = 11; + /** + * optional bool deferredRebuild = 11; + */ + boolean hasDeferredRebuild(); + /** + * optional bool deferredRebuild = 11; + */ + boolean getDeferredRebuild(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Index} + */ + public static final class Index extends + com.google.protobuf.GeneratedMessage + implements IndexOrBuilder { + // Use Index.newBuilder() to construct. + private Index(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Index(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Index defaultInstance; + public static Index getDefaultInstance() { + return defaultInstance; + } + + public Index getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Index( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + indexHandlerClass_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + dbName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + origTableName_ = input.readBytes(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + location_ = input.readBytes(); + break; + } + case 42: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = sdParameters_.toBuilder(); + } + sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sdParameters_); + sdParameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + case 48: { + bitField0_ |= 0x00000020; + createTime_ = input.readInt32(); + break; + } + case 56: { + bitField0_ |= 0x00000040; + lastAccessTime_ = input.readInt32(); + break; + } + case 66: { + bitField0_ |= 0x00000080; + indexTableName_ = input.readBytes(); + break; + } + case 74: { + bitField0_ |= 0x00000100; + sdHash_ = input.readBytes(); + break; + } + case 82: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000200) == 0x00000200)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000200; + break; + } + case 88: { + bitField0_ |= 0x00000400; + deferredRebuild_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Index parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Index(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string indexHandlerClass = 1; + public static final int INDEXHANDLERCLASS_FIELD_NUMBER = 1; + private java.lang.Object indexHandlerClass_; + /** + * optional string indexHandlerClass = 1; + * + *
+     * reserved
+     * 
+ */ + public boolean hasIndexHandlerClass() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string indexHandlerClass = 1; + * + *
+     * reserved
+     * 
+ */ + public java.lang.String getIndexHandlerClass() { + java.lang.Object ref = indexHandlerClass_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + indexHandlerClass_ = s; + } + return s; + } + } + /** + * optional string indexHandlerClass = 1; + * + *
+     * reserved
+     * 
+ */ + public com.google.protobuf.ByteString + getIndexHandlerClassBytes() { + java.lang.Object ref = indexHandlerClass_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + indexHandlerClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string dbName = 2; + public static final int DBNAME_FIELD_NUMBER = 2; + private java.lang.Object dbName_; + /** + * required string dbName = 2; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string dbName = 2; + */ + public java.lang.String getDbName() { + java.lang.Object ref = dbName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + dbName_ = s; + } + return s; + } + } + /** + * required string dbName = 2; + */ + public com.google.protobuf.ByteString + getDbNameBytes() { + java.lang.Object ref = dbName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + dbName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string origTableName = 3; + public static final int ORIGTABLENAME_FIELD_NUMBER = 3; + private java.lang.Object origTableName_; + /** + * required string origTableName = 3; + */ + public boolean hasOrigTableName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string origTableName = 3; + */ + public java.lang.String getOrigTableName() { + java.lang.Object ref = origTableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + origTableName_ = s; + } + return s; + } + } + /** + * required string origTableName = 3; + */ + public com.google.protobuf.ByteString + getOrigTableNameBytes() { + java.lang.Object ref = origTableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + origTableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string location = 4; + public static final int LOCATION_FIELD_NUMBER = 4; + private java.lang.Object location_; + /** + * optional string location = 4; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string location = 4; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + location_ = s; + } + return s; + } + } + /** + * optional string location = 4; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + public static final int SD_PARAMETERS_FIELD_NUMBER = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + return sdParameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + return sdParameters_; + } + + // optional int32 createTime = 6; + public static final int CREATETIME_FIELD_NUMBER = 6; + private int createTime_; + /** + * optional int32 createTime = 6; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional int32 createTime = 6; + */ + public int getCreateTime() { + return createTime_; + } + + // optional int32 lastAccessTime = 7; + public static final int LASTACCESSTIME_FIELD_NUMBER = 7; + private int lastAccessTime_; + /** + * optional int32 lastAccessTime = 7; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional int32 lastAccessTime = 7; + */ + public int getLastAccessTime() { + return lastAccessTime_; + } + + // optional string indexTableName = 8; + public static final int INDEXTABLENAME_FIELD_NUMBER = 8; + private java.lang.Object indexTableName_; + /** + * optional string indexTableName = 8; + */ + public boolean hasIndexTableName() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional string indexTableName = 8; + */ + public java.lang.String getIndexTableName() { + java.lang.Object ref = indexTableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + indexTableName_ = s; + } + return s; + } + } + /** + * optional string indexTableName = 8; + */ + public com.google.protobuf.ByteString + getIndexTableNameBytes() { + java.lang.Object ref = indexTableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + indexTableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bytes sd_hash = 9; + public static final int SD_HASH_FIELD_NUMBER = 9; + private com.google.protobuf.ByteString sdHash_; + /** + * optional bytes sd_hash = 9; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional bytes sd_hash = 9; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + public static final int PARAMETERS_FIELD_NUMBER = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + // optional bool deferredRebuild = 11; + public static final int DEFERREDREBUILD_FIELD_NUMBER = 11; + private boolean deferredRebuild_; + /** + * optional bool deferredRebuild = 11; + */ + public boolean hasDeferredRebuild() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional bool deferredRebuild = 11; + */ + public boolean getDeferredRebuild() { + return deferredRebuild_; + } + + private void initFields() { + indexHandlerClass_ = ""; + dbName_ = ""; + origTableName_ = ""; + location_ = ""; + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + createTime_ = 0; + lastAccessTime_ = 0; + indexTableName_ = ""; + sdHash_ = com.google.protobuf.ByteString.EMPTY; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + deferredRebuild_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDbName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasOrigTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getIndexHandlerClassBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getDbNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getOrigTableNameBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getLocationBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, sdParameters_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeInt32(6, createTime_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeInt32(7, lastAccessTime_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeBytes(8, getIndexTableNameBytes()); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeBytes(9, sdHash_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeMessage(10, parameters_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBool(11, deferredRebuild_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getIndexHandlerClassBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getDbNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getOrigTableNameBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getLocationBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, sdParameters_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(6, createTime_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(7, lastAccessTime_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(8, getIndexTableNameBytes()); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(9, sdHash_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, parameters_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(11, deferredRebuild_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Index} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.IndexOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSdParametersFieldBuilder(); + getParametersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + indexHandlerClass_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + dbName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + origTableName_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + location_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + createTime_ = 0; + bitField0_ = (bitField0_ & ~0x00000020); + lastAccessTime_ = 0; + bitField0_ = (bitField0_ & ~0x00000040); + indexTableName_ = ""; + bitField0_ = (bitField0_ & ~0x00000080); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000100); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + deferredRebuild_ = false; + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.indexHandlerClass_ = indexHandlerClass_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.dbName_ = dbName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.origTableName_ = origTableName_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.location_ = location_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (sdParametersBuilder_ == null) { + result.sdParameters_ = sdParameters_; + } else { + result.sdParameters_ = sdParametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.lastAccessTime_ = lastAccessTime_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + result.indexTableName_ = indexTableName_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000100; + } + result.sdHash_ = sdHash_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000200; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000400; + } + result.deferredRebuild_ = deferredRebuild_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index.getDefaultInstance()) return this; + if (other.hasIndexHandlerClass()) { + bitField0_ |= 0x00000001; + indexHandlerClass_ = other.indexHandlerClass_; + onChanged(); + } + if (other.hasDbName()) { + bitField0_ |= 0x00000002; + dbName_ = other.dbName_; + onChanged(); + } + if (other.hasOrigTableName()) { + bitField0_ |= 0x00000004; + origTableName_ = other.origTableName_; + onChanged(); + } + if (other.hasLocation()) { + bitField0_ |= 0x00000008; + location_ = other.location_; + onChanged(); + } + if (other.hasSdParameters()) { + mergeSdParameters(other.getSdParameters()); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasLastAccessTime()) { + setLastAccessTime(other.getLastAccessTime()); + } + if (other.hasIndexTableName()) { + bitField0_ |= 0x00000080; + indexTableName_ = other.indexTableName_; + onChanged(); + } + if (other.hasSdHash()) { + setSdHash(other.getSdHash()); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + if (other.hasDeferredRebuild()) { + setDeferredRebuild(other.getDeferredRebuild()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDbName()) { + + return false; + } + if (!hasOrigTableName()) { + + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Index) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string indexHandlerClass = 1; + private java.lang.Object indexHandlerClass_ = ""; + /** + * optional string indexHandlerClass = 1; + * + *
+       * reserved
+       * 
+ */ + public boolean hasIndexHandlerClass() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string indexHandlerClass = 1; + * + *
+       * reserved
+       * 
+ */ + public java.lang.String getIndexHandlerClass() { + java.lang.Object ref = indexHandlerClass_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + indexHandlerClass_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string indexHandlerClass = 1; + * + *
+       * reserved
+       * 
+ */ + public com.google.protobuf.ByteString + getIndexHandlerClassBytes() { + java.lang.Object ref = indexHandlerClass_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + indexHandlerClass_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string indexHandlerClass = 1; + * + *
+       * reserved
+       * 
+ */ + public Builder setIndexHandlerClass( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + indexHandlerClass_ = value; + onChanged(); + return this; + } + /** + * optional string indexHandlerClass = 1; + * + *
+       * reserved
+       * 
+ */ + public Builder clearIndexHandlerClass() { + bitField0_ = (bitField0_ & ~0x00000001); + indexHandlerClass_ = getDefaultInstance().getIndexHandlerClass(); + onChanged(); + return this; + } + /** + * optional string indexHandlerClass = 1; + * + *
+       * reserved
+       * 
+ */ + public Builder setIndexHandlerClassBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + indexHandlerClass_ = value; + onChanged(); + return this; + } + + // required string dbName = 2; + private java.lang.Object dbName_ = ""; + /** + * required string dbName = 2; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string dbName = 2; + */ + public java.lang.String getDbName() { + java.lang.Object ref = dbName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + dbName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string dbName = 2; + */ + public com.google.protobuf.ByteString + getDbNameBytes() { + java.lang.Object ref = dbName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + dbName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string dbName = 2; + */ + public Builder setDbName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + dbName_ = value; + onChanged(); + return this; + } + /** + * required string dbName = 2; + */ + public Builder clearDbName() { + bitField0_ = (bitField0_ & ~0x00000002); + dbName_ = getDefaultInstance().getDbName(); + onChanged(); + return this; + } + /** + * required string dbName = 2; + */ + public Builder setDbNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + dbName_ = value; + onChanged(); + return this; + } + + // required string origTableName = 3; + private java.lang.Object origTableName_ = ""; + /** + * required string origTableName = 3; + */ + public boolean hasOrigTableName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string origTableName = 3; + */ + public java.lang.String getOrigTableName() { + java.lang.Object ref = origTableName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + origTableName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string origTableName = 3; + */ + public com.google.protobuf.ByteString + getOrigTableNameBytes() { + java.lang.Object ref = origTableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + origTableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string origTableName = 3; + */ + public Builder setOrigTableName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + origTableName_ = value; + onChanged(); + return this; + } + /** + * required string origTableName = 3; + */ + public Builder clearOrigTableName() { + bitField0_ = (bitField0_ & ~0x00000004); + origTableName_ = getDefaultInstance().getOrigTableName(); + onChanged(); + return this; + } + /** + * required string origTableName = 3; + */ + public Builder setOrigTableNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + origTableName_ = value; + onChanged(); + return this; + } + + // optional string location = 4; + private java.lang.Object location_ = ""; + /** + * optional string location = 4; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string location = 4; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string location = 4; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string location = 4; + */ + public Builder setLocation( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + location_ = value; + onChanged(); + return this; + } + /** + * optional string location = 4; + */ + public Builder clearLocation() { + bitField0_ = (bitField0_ & ~0x00000008); + location_ = getDefaultInstance().getLocation(); + onChanged(); + return this; + } + /** + * optional string location = 4; + */ + public Builder setLocationBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + location_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + if (sdParametersBuilder_ == null) { + return sdParameters_; + } else { + return sdParametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sdParameters_ = value; + onChanged(); + } else { + sdParametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (sdParametersBuilder_ == null) { + sdParameters_ = builderForValue.build(); + onChanged(); + } else { + sdParametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + sdParameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); + } else { + sdParameters_ = value; + } + onChanged(); + } else { + sdParametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder clearSdParameters() { + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getSdParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + if (sdParametersBuilder_ != null) { + return sdParametersBuilder_.getMessageOrBuilder(); + } else { + return sdParameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 5; + * + *
+       * storage descriptor parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getSdParametersFieldBuilder() { + if (sdParametersBuilder_ == null) { + sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + sdParameters_, + getParentForChildren(), + isClean()); + sdParameters_ = null; + } + return sdParametersBuilder_; + } + + // optional int32 createTime = 6; + private int createTime_ ; + /** + * optional int32 createTime = 6; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional int32 createTime = 6; + */ + public int getCreateTime() { + return createTime_; + } + /** + * optional int32 createTime = 6; + */ + public Builder setCreateTime(int value) { + bitField0_ |= 0x00000020; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int32 createTime = 6; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000020); + createTime_ = 0; + onChanged(); + return this; + } + + // optional int32 lastAccessTime = 7; + private int lastAccessTime_ ; + /** + * optional int32 lastAccessTime = 7; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional int32 lastAccessTime = 7; + */ + public int getLastAccessTime() { + return lastAccessTime_; + } + /** + * optional int32 lastAccessTime = 7; + */ + public Builder setLastAccessTime(int value) { + bitField0_ |= 0x00000040; + lastAccessTime_ = value; + onChanged(); + return this; + } + /** + * optional int32 lastAccessTime = 7; + */ + public Builder clearLastAccessTime() { + bitField0_ = (bitField0_ & ~0x00000040); + lastAccessTime_ = 0; + onChanged(); + return this; + } + + // optional string indexTableName = 8; + private java.lang.Object indexTableName_ = ""; + /** + * optional string indexTableName = 8; + */ + public boolean hasIndexTableName() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional string indexTableName = 8; + */ + public java.lang.String getIndexTableName() { + java.lang.Object ref = indexTableName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + indexTableName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string indexTableName = 8; + */ + public com.google.protobuf.ByteString + getIndexTableNameBytes() { + java.lang.Object ref = indexTableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + indexTableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string indexTableName = 8; + */ + public Builder setIndexTableName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + indexTableName_ = value; + onChanged(); + return this; + } + /** + * optional string indexTableName = 8; + */ + public Builder clearIndexTableName() { + bitField0_ = (bitField0_ & ~0x00000080); + indexTableName_ = getDefaultInstance().getIndexTableName(); + onChanged(); + return this; + } + /** + * optional string indexTableName = 8; + */ + public Builder setIndexTableNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + indexTableName_ = value; + onChanged(); + return this; + } + + // optional bytes sd_hash = 9; + private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes sd_hash = 9; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional bytes sd_hash = 9; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + /** + * optional bytes sd_hash = 9; + */ + public Builder setSdHash(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000100; + sdHash_ = value; + onChanged(); + return this; + } + /** + * optional bytes sd_hash = 9; + */ + public Builder clearSdHash() { + bitField0_ = (bitField0_ & ~0x00000100); + sdHash_ = getDefaultInstance().getSdHash(); + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // optional bool deferredRebuild = 11; + private boolean deferredRebuild_ ; + /** + * optional bool deferredRebuild = 11; + */ + public boolean hasDeferredRebuild() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional bool deferredRebuild = 11; + */ + public boolean getDeferredRebuild() { + return deferredRebuild_; + } + /** + * optional bool deferredRebuild = 11; + */ + public Builder setDeferredRebuild(boolean value) { + bitField0_ |= 0x00000400; + deferredRebuild_ = value; + onChanged(); + return this; + } + /** + * optional bool deferredRebuild = 11; + */ + public Builder clearDeferredRebuild() { + bitField0_ = (bitField0_ & ~0x00000400); + deferredRebuild_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Index) + } + + static { + defaultInstance = new Index(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Index) + } + public interface PartitionKeyComparatorOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -34618,6 +36572,11 @@ public Builder removeRange(int index) { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -34787,24 +36746,32 @@ public Builder removeRange(int index) { "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" + "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo", "p.hive.metastore.hbase.PrincipalPrivileg" + - "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\353\004\n\026Partition" + - "KeyComparator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 " + - "\002(\t\022S\n\002op\030\003 \003(\0132G.org.apache.hadoop.hive" + - ".metastore.hbase.PartitionKeyComparator." + - "Operator\022S\n\005range\030\004 \003(\0132D.org.apache.had" + - "oop.hive.metastore.hbase.PartitionKeyCom" + - "parator.Range\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\t" + - "inclusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R" + - "\n\005start\030\002 \001(\0132C.org.apache.hadoop.hive.m", - "etastore.hbase.PartitionKeyComparator.Ma" + - "rk\022P\n\003end\030\003 \001(\0132C.org.apache.hadoop.hive" + - ".metastore.hbase.PartitionKeyComparator." + - "Mark\032\241\001\n\010Operator\022Z\n\004type\030\001 \002(\0162L.org.ap" + - "ache.hadoop.hive.metastore.hbase.Partiti" + - "onKeyComparator.Operator.Type\022\013\n\003key\030\002 \002" + - "(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOT" + - "EQUALS\020\001*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004R" + - "OLE\020\001" + "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\334\002\n\005Index\022\031\n\021" + + "indexHandlerClass\030\001 \001(\t\022\016\n\006dbName\030\002 \002(\t\022" + + "\025\n\rorigTableName\030\003 \002(\t\022\020\n\010location\030\004 \001(\t" + + "\022I\n\rsd_parameters\030\005 \001(\01322.org.apache.had" + + "oop.hive.metastore.hbase.Parameters\022\022\n\nc" + + "reateTime\030\006 \001(\005\022\026\n\016lastAccessTime\030\007 \001(\005\022" + + "\026\n\016indexTableName\030\010 \001(\t\022\017\n\007sd_hash\030\t \001(\014" + + "\022F\n\nparameters\030\n \001(\01322.org.apache.hadoop" + + ".hive.metastore.hbase.Parameters\022\027\n\017defe", + "rredRebuild\030\013 \001(\010\"\353\004\n\026PartitionKeyCompar" + + "ator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op" + + "\030\003 \003(\0132G.org.apache.hadoop.hive.metastor" + + "e.hbase.PartitionKeyComparator.Operator\022" + + "S\n\005range\030\004 \003(\0132D.org.apache.hadoop.hive." + + "metastore.hbase.PartitionKeyComparator.R" + + "ange\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive" + + "\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002" + + " \001(\0132C.org.apache.hadoop.hive.metastore." + + "hbase.PartitionKeyComparator.Mark\022P\n\003end", + "\030\003 \001(\0132C.org.apache.hadoop.hive.metastor" + + "e.hbase.PartitionKeyComparator.Mark\032\241\001\n\010" + + "Operator\022Z\n\004type\030\001 \002(\0162L.org.apache.hado" + + "op.hive.metastore.hbase.PartitionKeyComp" + + "arator.Operator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val" + + "\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*" + + "#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -35021,8 +36988,14 @@ public Builder removeRange(int index) { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor = getDescriptor().getMessageTypes().get(21); + internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor, + new java.lang.String[] { "IndexHandlerClass", "DbName", "OrigTableName", "Location", "SdParameters", "CreateTime", "LastAccessTime", "IndexTableName", "SdHash", "Parameters", "DeferredRebuild", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor = + getDescriptor().getMessageTypes().get(22); internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java index 434bd9e..b005b4e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore.hbase; import com.google.common.annotations.VisibleForTesting; + import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; @@ -35,6 +36,7 @@ import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -118,6 +120,7 @@ protected RawStore initialValue() { private List dbs; private BlockingQueue partitionedTables; private BlockingQueue tableNameQueue; + private BlockingQueue indexNameQueue; private BlockingQueue partQueue; private boolean writingToQueue, readersFinished; private boolean doKerberos, doAll; @@ -239,6 +242,7 @@ private int init(String... args) throws ParseException { // We don't want to bound the size of the table queue because we keep it all in memory partitionedTables = new LinkedBlockingQueue<>(); tableNameQueue = new LinkedBlockingQueue<>(); + indexNameQueue = new LinkedBlockingQueue<>(); // Bound the size of this queue so we don't get too much in memory. partQueue = new ArrayBlockingQueue<>(parallel * 2); @@ -263,6 +267,7 @@ void run() throws MetaException, InstantiationException, IllegalAccessException, if (doAll || dbsToImport != null || tablesToImport != null) { copyTables(); copyPartitions(); + copyIndexes(); } if (doAll || dbsToImport != null || functionsToImport != null) { copyFunctions(); @@ -371,6 +376,66 @@ public void run() { } } + private void copyIndexes() throws MetaException, InvalidObjectException, InterruptedException { + screen("Copying indexes"); + + // Start the parallel threads that will copy the indexes + Thread[] copiers = new Thread[parallel]; + writingToQueue = true; + for (int i = 0; i < parallel; i++) { + copiers[i] = new IndexCopier(); + copiers[i].start(); + } + + // Put indexes from the databases we copied into the queue + for (Database db : dbs) { + screen("Coyping indexes in database " + db.getName()); + for (String tableName : rdbmsStore.get().getAllTables(db.getName())) { + for (Index index : rdbmsStore.get().getIndexes(db.getName(), tableName, -1)) { + indexNameQueue.put(new String[]{db.getName(), tableName, index.getIndexName()}); + } + } + } + + // Now put any specifically requested tables into the queue + if (tablesToImport != null) { + for (String compoundTableName : tablesToImport) { + String[] tn = compoundTableName.split("\\."); + if (tn.length != 2) { + error(compoundTableName + " not in proper form. Must be in form dbname.tablename. " + + "Ignoring this table and continuing."); + } else { + for (Index index : rdbmsStore.get().getIndexes(tn[0], tn[1], -1)) { + indexNameQueue.put(new String[]{tn[0], tn[1], index.getIndexName()}); + } + } + } + } + + writingToQueue = false; + + // Wait until we've finished adding all the tables + for (Thread copier : copiers) copier.join(); + } + + private class IndexCopier extends Thread { + @Override + public void run() { + while (writingToQueue || indexNameQueue.size() > 0) { + try { + String[] name = indexNameQueue.poll(1, TimeUnit.SECONDS); + if (name != null) { + Index index = rdbmsStore.get().getIndex(name[0], name[1], name[2]); + screen("Copying index " + name[0] + "." + name[1] + "." + name[2]); + hbaseStore.get().addIndex(index); + } + } catch (InterruptedException | MetaException | InvalidObjectException e) { + throw new RuntimeException(e); + } + } + } + } + /* Partition copying is a little complex. As we went through and copied the tables we put each * partitioned table into a queue. We will now go through that queue and add partitions for the * tables. We do the finding of partitions and writing of them separately and in parallel. diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index 2860875..7901bde 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; @@ -98,6 +99,7 @@ final static String SECURITY_TABLE = "HBMS_SECURITY"; final static String SEQUENCES_TABLE = "HBMS_SEQUENCES"; final static String TABLE_TABLE = "HBMS_TBLS"; + final static String INDEX_TABLE = "HBMS_INDEX"; final static String USER_TO_ROLE_TABLE = "HBMS_USER_TO_ROLE"; final static String FILE_METADATA_TABLE = "HBMS_FILE_METADATA"; final static byte[] CATALOG_CF = "c".getBytes(HBaseUtils.ENCODING); @@ -109,7 +111,7 @@ public final static String[] tableNames = { AGGR_STATS_TABLE, DB_TABLE, FUNC_TABLE, GLOBAL_PRIVS_TABLE, PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, SECURITY_TABLE, SEQUENCES_TABLE, - TABLE_TABLE, FILE_METADATA_TABLE }; + TABLE_TABLE, INDEX_TABLE, FILE_METADATA_TABLE }; public final static Map> columnFamilies = new HashMap<> (tableNames.length); static { @@ -124,6 +126,7 @@ columnFamilies.put(SECURITY_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(SEQUENCES_TABLE, Arrays.asList(CATALOG_CF)); columnFamilies.put(TABLE_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); + columnFamilies.put(INDEX_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); // Stats CF will contain PPD stats. columnFamilies.put(FILE_METADATA_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); } @@ -1745,6 +1748,119 @@ private Table getTable(String dbName, String tableName, boolean populateCache) } /********************************************************************************************** + * Index related methods + *********************************************************************************************/ + + /** + * Put an index object. This should only be called when the index is new (create index) as it + * will blindly add/increment the storage descriptor. If you are altering an existing index + * call {@link #replaceIndex} instead. + * @param index index object + * @throws IOException + */ + void putIndex(Index index) throws IOException { + byte[] hash = putStorageDescriptor(index.getSd()); + byte[][] serialized = HBaseUtils.serializeIndex(index, hash); + store(INDEX_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + } + + /** + * Fetch an index object + * @param dbName database the table is in + * @param origTableName original table name + * @param indexName index name + * @return Index object, or null if no such table + * @throws IOException + */ + Index getIndex(String dbName, String origTableName, String indexName) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, origTableName, indexName); + byte[] serialized = read(INDEX_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeIndex(dbName, origTableName, indexName, serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + return sdParts.containingIndex; + } + + /** + * Delete a table + * @param dbName name of database table is in + * @param origTableName table the index is built on + * @param indexName index name + * @throws IOException + */ + void deleteIndex(String dbName, String origTableName, String indexName) throws IOException { + deleteIndex(dbName, origTableName, indexName, true); + } + + void deleteIndex(String dbName, String origTableName, String indexName, boolean decrementRefCnt) + throws IOException { + // Find the index so I can get the storage descriptor and drop it + if (decrementRefCnt) { + Index index = getIndex(dbName, origTableName, indexName); + decrementStorageDescriptorRefCount(index.getSd()); + } + byte[] key = HBaseUtils.buildKey(dbName, origTableName, indexName); + delete(INDEX_TABLE, key, null, null); + } + + /** + * Get a list of tables. + * @param dbName Database these tables are in + * @param origTableName original table name + * @param maxResults max indexes to fetch. If negative all indexes will be returned. + * @return list of indexes of the table + * @throws IOException + */ + List scanIndexes(String dbName, String origTableName, int maxResults) throws IOException { + // There's no way to know whether all the tables we are looking for are + // in the cache, so we would need to scan one way or another. Thus there's no value in hitting + // the cache for this function. + byte[] keyPrefix = null; + if (dbName != null) { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName, origTableName); + } + Iterator iter = scan(INDEX_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), + CATALOG_CF, CATALOG_COL, null); + List indexes = new ArrayList<>(); + int numToFetch = maxResults < 0 ? Integer.MAX_VALUE : maxResults; + for (int i = 0; i < numToFetch && iter.hasNext(); i++) { + Result result = iter.next(); + HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializeIndex(result.getRow(), + result.getValue(CATALOG_CF, CATALOG_COL)); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + indexes.add(sdParts.containingIndex); + } + return indexes; + } + + /** + * Replace an existing index. This will also compare the storage descriptors and see if the + * reference count needs to be adjusted + * @param oldIndex old version of the index + * @param newIndex new version of the index + */ + void replaceIndex(Index oldIndex, Index newIndex) throws IOException { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldIndex.getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newIndex.getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldIndex.getSd()); + hash = putStorageDescriptor(newIndex.getSd()); + } + byte[][] serialized = HBaseUtils.serializeIndex(newIndex, hash); + store(INDEX_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + if (!(oldIndex.getDbName().equals(newIndex.getDbName()) && + oldIndex.getOrigTableName().equals(newIndex.getOrigTableName()) && + oldIndex.getIndexName().equals(newIndex.getIndexName()))) { + deleteIndex(oldIndex.getDbName(), oldIndex.getOrigTableName(), oldIndex.getIndexName(), false); + } + } + /********************************************************************************************** * StorageDescriptor related methods *********************************************************************************************/ diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index d4e5da4..900badf 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -704,38 +704,128 @@ public void alterPartitions(String db_name, String tbl_name, List> @Override public boolean addIndex(Index index) throws InvalidObjectException, MetaException { - throw new UnsupportedOperationException(); + boolean commit = false; + openTransaction(); + try { + index.setDbName(HiveStringUtils.normalizeIdentifier(index.getDbName())); + index.setOrigTableName(HiveStringUtils.normalizeIdentifier(index.getOrigTableName())); + index.setIndexName(HiveStringUtils.normalizeIdentifier(index.getIndexName())); + index.setIndexTableName(HiveStringUtils.normalizeIdentifier(index.getIndexTableName())); + getHBase().putIndex(index); + commit = true; + } catch (IOException e) { + LOG.error("Unable to create index ", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + return commit; } @Override public Index getIndex(String dbName, String origTableName, String indexName) throws MetaException { - throw new UnsupportedOperationException(); + boolean commit = false; + openTransaction(); + try { + Index index = getHBase().getIndex(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(origTableName), + HiveStringUtils.normalizeIdentifier(indexName)); + if (index == null) { + LOG.debug("Unable to find index " + indexNameForErrorMsg(dbName, origTableName, indexName)); + } + commit = true; + return index; + } catch (IOException e) { + LOG.error("Unable to get index", e); + throw new MetaException("Error reading index " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } } @Override public boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException { - throw new UnsupportedOperationException(); + boolean commit = false; + openTransaction(); + try { + getHBase().deleteIndex(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(origTableName), + HiveStringUtils.normalizeIdentifier(indexName)); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete index" + e); + throw new MetaException("Unable to drop index " + + indexNameForErrorMsg(dbName, origTableName, indexName)); + } finally { + commitOrRoleBack(commit); + } } @Override public List getIndexes(String dbName, String origTableName, int max) throws MetaException { - // TODO - Index not currently supported. But I need to return an empty list or else drop - // table cores. - return new ArrayList(); + boolean commit = false; + openTransaction(); + try { + List indexes = getHBase().scanIndexes(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(origTableName), max); + commit = true; + return indexes; + } catch (IOException e) { + LOG.error("Unable to get indexes", e); + throw new MetaException("Error scanning indexxes"); + } finally { + commitOrRoleBack(commit); + } } @Override public List listIndexNames(String dbName, String origTableName, short max) throws MetaException { - throw new UnsupportedOperationException(); + boolean commit = false; + openTransaction(); + try { + List indexes = getHBase().scanIndexes(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(origTableName), max); + if (indexes == null) return null; + List names = new ArrayList(indexes.size()); + for (Index index : indexes) { + names.add(index.getIndexName()); + } + commit = true; + return names; + } catch (IOException e) { + LOG.error("Unable to get indexes", e); + throw new MetaException("Error scanning indexes"); + } finally { + commitOrRoleBack(commit); + } } @Override public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws InvalidObjectException, MetaException { - throw new UnsupportedOperationException(); + boolean commit = false; + openTransaction(); + try { + Index newIndexCopy = newIndex.deepCopy(); + newIndexCopy.setDbName(HiveStringUtils.normalizeIdentifier(newIndexCopy.getDbName())); + newIndexCopy.setOrigTableName( + HiveStringUtils.normalizeIdentifier(newIndexCopy.getOrigTableName())); + newIndexCopy.setIndexName(HiveStringUtils.normalizeIdentifier(newIndexCopy.getIndexName())); + getHBase().replaceIndex(getHBase().getIndex(HiveStringUtils.normalizeIdentifier(dbname), + HiveStringUtils.normalizeIdentifier(baseTblName), + HiveStringUtils.normalizeIdentifier(name)), newIndexCopy); + commit = true; + } catch (IOException e) { + LOG.error("Unable to alter index " + indexNameForErrorMsg(dbname, baseTblName, name), e); + throw new MetaException("Unable to alter index " + + indexNameForErrorMsg(dbname, baseTblName, name)); + } finally { + commitOrRoleBack(commit); + } } @Override @@ -2431,6 +2521,12 @@ private String partNameForErrorMsg(String dbName, String tableName, List return tableNameForErrorMsg(dbName, tableName) + "." + StringUtils.join(partVals, ':'); } + // This is for building error messages only. It does not look up anything in the metastore as + // they may just throw another error. + private String indexNameForErrorMsg(String dbName, String origTableName, String indexName) { + return tableNameForErrorMsg(dbName, origTableName) + "." + indexName; + } + private String buildExternalPartName(Table table, Partition part) { return buildExternalPartName(table, part.getValues()); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index e0b449b..c725748 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; @@ -830,18 +831,25 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) Map parameters; Partition containingPartition; Table containingTable; + Index containingIndex; } static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { - SharedStorageDescriptor ssd = new SharedStorageDescriptor(); - ssd.setLocation(parts.location); - ssd.setParameters(parts.parameters); - ssd.setShared(sd); + SharedStorageDescriptor ssd = null; + if (sd != null) { + ssd = new SharedStorageDescriptor(); + ssd.setLocation(parts.location); + ssd.setParameters(parts.parameters); + ssd.setShared(sd); + } if (parts.containingPartition != null) { parts.containingPartition.setSd(ssd); } else if (parts.containingTable != null) { parts.containingTable.setSd(ssd); - } else { + } else if (parts.containingIndex != null) { + parts.containingIndex.setSd(ssd); + } + else { throw new RuntimeException("Need either a partition or a table"); } } @@ -1114,6 +1122,94 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, return sdParts; } + /** + * Serialize an index + * @param index index object + * @param sdHash hash that is being used as a key for the enclosed storage descriptor + * @return First element is the key, second is the serialized index + */ + static byte[][] serializeIndex(Index index, byte[] sdHash) { + byte[][] result = new byte[2][]; + result[0] = buildKey(HiveStringUtils.normalizeIdentifier(index.getDbName()), + HiveStringUtils.normalizeIdentifier(index.getOrigTableName()), + HiveStringUtils.normalizeIdentifier(index.getIndexName())); + HbaseMetastoreProto.Index.Builder builder = HbaseMetastoreProto.Index.newBuilder(); + builder.setDbName(index.getDbName()); + builder.setOrigTableName(index.getOrigTableName()); + if (index.getSd().getLocation() != null) builder.setLocation(index.getSd().getLocation()); + if (index.getSd().getParameters() != null) { + builder.setSdParameters(buildParameters(index.getSd().getParameters())); + } + if (index.getIndexHandlerClass() != null) { + builder.setIndexHandlerClass(index.getIndexHandlerClass()); + } + if (index.getIndexTableName() != null) { + builder.setIndexTableName(index.getIndexTableName()); + } + builder + .setCreateTime(index.getCreateTime()) + .setLastAccessTime(index.getLastAccessTime()) + .setDeferredRebuild(index.isDeferredRebuild()); + if (index.getParameters() != null) { + builder.setParameters(buildParameters(index.getParameters())); + } + if (sdHash != null) { + builder.setSdHash(ByteString.copyFrom(sdHash)); + } + result[1] = builder.build().toByteArray(); + return result; + } + + /** + * Deserialize an index. This version should be used when the index key is not already + * known (eg a scan). + * @param key the key fetched from HBase + * @param serialized the value fetched from HBase + * @return A struct that contains the index plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializeIndex(byte[] key, byte[] serialized) + throws InvalidProtocolBufferException { + String[] keys = deserializeKey(key); + return deserializeIndex(keys[0], keys[1], keys[2], serialized); + } + + /** + * Deserialize an index. This version should be used when the table key is + * known (eg a get). + * @param dbName database name + * @param origTableName original table name + * @param indexName index name + * @param serialized the value fetched from HBase + * @return A struct that contains the index plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializeIndex(String dbName, String origTableName, + String indexName, byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.Index proto = HbaseMetastoreProto.Index.parseFrom(serialized); + Index index = new Index(); + StorageDescriptorParts sdParts = new StorageDescriptorParts(); + sdParts.containingIndex = index; + index.setDbName(dbName); + index.setIndexName(indexName); + index.setOrigTableName(origTableName); + if (proto.hasLocation()) sdParts.location = proto.getLocation(); + if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); + if (proto.hasIndexHandlerClass()) { + index.setIndexHandlerClass(proto.getIndexHandlerClass()); + } + if (proto.hasIndexTableName()) { + index.setIndexTableName(proto.getIndexTableName()); + } + index.setCreateTime(proto.getCreateTime()); + index.setLastAccessTime(proto.getLastAccessTime()); + index.setDeferredRebuild(proto.getDeferredRebuild()); + index.setParameters(buildParameters(proto.getParameters())); + if (proto.hasSdHash()) { + sdParts.sdHash = proto.getSdHash().toByteArray(); + } + return sdParts; + } + static byte[] serializeBloomFilter(String dbName, String tableName, BloomFilter bloom) { long[] bitSet = bloom.getBitSet(); List bits = new ArrayList<>(bitSet.length); diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 466fdf9..6fbe36c 100644 --- a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -257,6 +257,20 @@ message Table { optional bool is_temporary = 14; } +message Index { + optional string indexHandlerClass = 1; // reserved + required string dbName = 2; + required string origTableName = 3; + optional string location = 4; + optional Parameters sd_parameters = 5; // storage descriptor parameters + optional int32 createTime = 6; + optional int32 lastAccessTime = 7; + optional string indexTableName = 8; + optional bytes sd_hash = 9; + optional Parameters parameters = 10; + optional bool deferredRebuild = 11; +} + message PartitionKeyComparator { required string names = 1; required string types = 2; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index 2e1f5f4..c144246 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -256,6 +257,12 @@ public static void dropAllStoreObjects(RawStore store) throws MetaException, Inv String db = dbs.get(i); List tbls = store.getAllTables(db); for (String tbl : tbls) { + List indexes = store.getIndexes(db, tbl, 100); + for (Index index : indexes) { + store.dropIndex(db, tbl, index.getIndexName()); + } + } + for (String tbl : tbls) { Deadline.startTimer("getPartition"); List parts = store.getPartitions(db, tbl, 100); for (Partition part : parts) { diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index e4723f6..4894ed3 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; @@ -757,6 +758,132 @@ public void dropPartition() throws Exception { } @Test + public void createIndex() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + Map params = new HashMap(); + params.put("key", "value"); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + String indexName = "myindex"; + String indexTableName = tableName + "__" + indexName + "__"; + Index index = new Index(indexName, null, "default", tableName, startTime, startTime, + indexTableName, sd, emptyParameters, false); + store.addIndex(index); + + Index ind = store.getIndex("default", tableName, indexName); + Assert.assertEquals(1, ind.getSd().getColsSize()); + Assert.assertEquals("col1", ind.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", ind.getSd().getCols().get(0).getType()); + Assert.assertEquals("", ind.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", ind.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", ind.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", ind.getSd().getLocation()); + Assert.assertEquals("input", ind.getSd().getInputFormat()); + Assert.assertEquals("output", ind.getSd().getOutputFormat()); + Assert.assertFalse(ind.getSd().isCompressed()); + Assert.assertEquals(17, ind.getSd().getNumBuckets()); + Assert.assertEquals(1, ind.getSd().getBucketColsSize()); + Assert.assertEquals("bucketcol", ind.getSd().getBucketCols().get(0)); + Assert.assertEquals(1, ind.getSd().getSortColsSize()); + Assert.assertEquals("sortcol", ind.getSd().getSortCols().get(0).getCol()); + Assert.assertEquals(1, ind.getSd().getSortCols().get(0).getOrder()); + Assert.assertEquals(1, ind.getSd().getParametersSize()); + Assert.assertEquals("value", ind.getSd().getParameters().get("key")); + Assert.assertEquals(indexName, ind.getIndexName()); + Assert.assertNull(ind.getIndexHandlerClass()); + Assert.assertEquals("default", ind.getDbName()); + Assert.assertEquals(tableName, ind.getOrigTableName()); + Assert.assertEquals(0, ind.getParametersSize()); + Assert.assertEquals(startTime, ind.getCreateTime()); + Assert.assertEquals(startTime, ind.getLastAccessTime()); + Assert.assertEquals(false, ind.isDeferredRebuild()); + } + + @Test + public void alterIndex() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + Map params = new HashMap(); + params.put("key", "value"); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + String indexName = "myindex"; + Index index = new Index(indexName, null, "default", tableName, startTime, startTime, + tableName + "__" + indexName + "__", sd, emptyParameters, false); + store.addIndex(index); + + startTime += 10; + index.setLastAccessTime(startTime); + store.alterIndex("default", tableName, indexName, index); + + Index ind = store.getIndex("default", tableName, indexName); + Assert.assertEquals(1, ind.getSd().getColsSize()); + Assert.assertEquals("col1", ind.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", ind.getSd().getCols().get(0).getType()); + Assert.assertEquals("", ind.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", ind.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", ind.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", ind.getSd().getLocation()); + Assert.assertEquals("input", ind.getSd().getInputFormat()); + Assert.assertEquals("output", ind.getSd().getOutputFormat()); + Assert.assertFalse(ind.getSd().isCompressed()); + Assert.assertEquals(17, ind.getSd().getNumBuckets()); + Assert.assertEquals(1, ind.getSd().getBucketColsSize()); + Assert.assertEquals("bucketcol", ind.getSd().getBucketCols().get(0)); + Assert.assertEquals(1, ind.getSd().getSortColsSize()); + Assert.assertEquals("sortcol", ind.getSd().getSortCols().get(0).getCol()); + Assert.assertEquals(1, ind.getSd().getSortCols().get(0).getOrder()); + Assert.assertEquals(1, ind.getSd().getParametersSize()); + Assert.assertEquals("value", ind.getSd().getParameters().get("key")); + Assert.assertEquals(indexName, ind.getIndexName()); + Assert.assertNull(ind.getIndexHandlerClass()); + Assert.assertEquals("default", ind.getDbName()); + Assert.assertEquals(tableName, ind.getOrigTableName()); + Assert.assertEquals(0, ind.getParametersSize()); + Assert.assertEquals(startTime, ind.getLastAccessTime()); + Assert.assertEquals(false, ind.isDeferredRebuild()); + } + + @Test + public void dropIndex() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + Map params = new HashMap(); + params.put("key", "value"); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + String indexName = "myindex"; + Index index = new Index(indexName, null, "default", tableName, startTime, startTime, + tableName + "__" + indexName + "__", sd, emptyParameters, false); + store.addIndex(index); + + store.dropIndex("default", tableName, indexName); + + } + + @Test public void createRole() throws Exception { int now = (int)System.currentTimeMillis()/1000; String roleName = "myrole"; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index ab165f1..0021f81 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1259,7 +1259,7 @@ public Table getTable(final String dbName, final String tableName, if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) { // Fix the non-printable chars Map parameters = tTable.getSd().getParameters(); - String sf = parameters.get(SERIALIZATION_FORMAT); + String sf = parameters!=null?parameters.get(SERIALIZATION_FORMAT) : null; if (sf != null) { char[] b = sf.toCharArray(); if ((b.length == 1) && (b[0] < 10)) { // ^A, ^B, ^C, ^D, \t