Index: hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java (revision 1459141) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java (working copy) @@ -21,10 +21,16 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State; import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; +import com.google.protobuf.InvalidProtocolBufferException; + import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -58,6 +64,7 @@ */ private final Map cache = new HashMap(); + private byte[] data; // TODO: Make it so always a table znode. Put table schema here as well as table state. // Have watcher on table znode so all are notified of state or schema change. @@ -134,7 +141,100 @@ setTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); } } + + public void createTableCreationStatusNode(final String tableName) throws KeeperException{ + String tableStatusZNode = pathOfTableStatusZNode(tableName); + if (ZKUtil.checkExists(this.watcher, tableStatusZNode) == -1) { + ZKUtil.createAndFailSilent(this.watcher, tableStatusZNode); + } + } + + public void cleanUpTableStatusNode(final String tableName) throws KeeperException { + String tableStatusZNode = pathOfTableStatusZNode(tableName); + ZKUtil.deleteNodeFailSilent(this.watcher, tableStatusZNode); + } + + + /** + * If the table is found in CREATING_TABLE state the inmemory state is + * removed. This helps in cases where CreateTable is to be retried by the + * client incase of failures + * + * @param tableName + */ + public void removeCreatingTable(final String tableName) { + synchronized (this.cache) { + if (isCreatingTable(tableName)) { + this.cache.remove(tableName); + } + } + } + + public static Set getCreatingTables(ZooKeeperWatcher zkw) throws KeeperException { + return getAllTables(zkw, ZooKeeperProtos.Table.State.CREATINGTABLE); + } + + + public TableCreationStatus.State getCurrentTableCreationState(String tableName) throws KeeperException { + String tableStatusZNode = pathOfTableStatusZNode(tableName); + byte[] data = ZKUtil.getData(this.watcher, tableStatusZNode); + try { + ProtobufUtil.expectPBMagicPrefix(data); + ZooKeeperProtos.TableCreationStatus.Builder builder = ZooKeeperProtos.TableCreationStatus.newBuilder(); + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ZooKeeperProtos.TableCreationStatus t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); + return t.getState(); + } catch (InvalidProtocolBufferException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + throw ke; + } catch (DeserializationException e) { + throw ZKUtil.convert(e); + } + } + + public void setStatusCreatedTableDescriptor(final String tableName) throws NoNodeException, + KeeperException { + setStatusOfTableCreationNode(tableName, TableCreationStatus.State.CREATED_TD); + } + + public void setStatusCreatedRegionInfo(final String tableName) throws NoNodeException, + KeeperException { + setStatusOfTableCreationNode(tableName, TableCreationStatus.State.CREATED_REGIONINFO); + } + + public void setStatusMovedToOriginalLocation(final String tableName) throws NoNodeException, + KeeperException { + setStatusOfTableCreationNode(tableName, TableCreationStatus.State.MOVED_TO_ORIG_LOCATION); + } + + public void setStatusToAddedToMeta(final String tableName) throws NoNodeException, + KeeperException { + setStatusOfTableCreationNode(tableName, TableCreationStatus.State.ADD_TO_META); + } + + public void setStatusAssignUserRegions(final String tableName) throws NoNodeException, + KeeperException { + setStatusOfTableCreationNode(tableName, TableCreationStatus.State.ASSIGN_USER_REGIONS); + } + + private void setStatusOfTableCreationNode(final String tableName, final TableCreationStatus.State state) + throws NoNodeException, KeeperException { + String tableStatusZNode = pathOfTableStatusZNode(tableName); + ZooKeeperProtos.TableCreationStatus.Builder builder = ZooKeeperProtos.TableCreationStatus + .newBuilder(); + builder.setState(state); + byte[] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + ZKUtil.setData(this.watcher, tableStatusZNode, data); + } + + private String pathOfTableStatusZNode(final String tableName) { + String tableZNode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName); + String tableStatusZNode = ZKUtil.joinZNode(tableZNode, tableName); + return tableStatusZNode; + } + /** * Sets the specified table as ENABLING in zookeeper atomically * If the table is already in ENABLING state, no operation is performed @@ -152,6 +252,24 @@ return true; } } + + /** + * Sets the specified table as ENABLING in zookeeper atomically + * If the table is already in ENABLING state, no operation is performed + * @param tableName + * @return if the operation succeeds or not + * @throws KeeperException unexpected zookeeper exception + */ + public boolean checkAndSetCreatingTable(final String tableName) + throws KeeperException { + synchronized (this.cache) { + if (isEnablingTable(tableName)) { + return false; + } + setTableState(tableName, ZooKeeperProtos.Table.State.CREATINGTABLE); + return true; + } + } /** * Sets the specified table as ENABLING in zookeeper atomically @@ -215,6 +333,10 @@ public boolean isEnablingTable(final String tableName) { return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); } + + public boolean isCreatingTable(final String tableName) { + return isTableState(tableName, ZooKeeperProtos.Table.State.CREATINGTABLE); + } public boolean isEnabledTable(String tableName) { return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); @@ -381,4 +503,5 @@ } return allTables; } + } Index: hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (revision 1459141) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (working copy) @@ -105,6 +105,8 @@ public String balancerZNode; // znode containing the lock for the tables public String tableLockZNode; + // znode that has the current state of the create table + public String tableCreationStatusNode; // Certain ZooKeeper nodes need to be world-readable public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = @@ -220,6 +222,9 @@ conf.get("zookeeper.znode.balancer", "balancer")); tableLockZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); + + tableLockZNode = ZKUtil.joinZNode(tableZNode, + conf.get("zookeeper.znode.tatbleStatus", "table-status")); } /** Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java =================================================================== --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (revision 1459141) +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (working copy) @@ -1,5871 +1,6361 @@ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: ZooKeeper.proto - -package org.apache.hadoop.hbase.protobuf.generated; - -public final class ZooKeeperProtos { - private ZooKeeperProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistry registry) { - } - public interface RootRegionServerOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ServerName server = 1; - boolean hasServer(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); - } - public static final class RootRegionServer extends - com.google.protobuf.GeneratedMessage - implements RootRegionServerOrBuilder { - // Use RootRegionServer.newBuilder() to construct. - private RootRegionServer(Builder builder) { - super(builder); - } - private RootRegionServer(boolean noInit) {} - - private static final RootRegionServer defaultInstance; - public static RootRegionServer getDefaultInstance() { - return defaultInstance; - } - - public RootRegionServer getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable; - } - - private int bitField0_; - // required .ServerName server = 1; - public static final int SERVER_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; - public boolean hasServer() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { - return server_; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { - return server_; - } - - private void initFields() { - server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasServer()) { - memoizedIsInitialized = 0; - return false; - } - if (!getServer().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, server_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, server_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) obj; - - boolean result = true; - result = result && (hasServer() == other.hasServer()); - if (hasServer()) { - result = result && getServer() - .equals(other.getServer()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasServer()) { - hash = (37 * hash) + SERVER_FIELD_NUMBER; - hash = (53 * hash) + getServer().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServerOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (serverBuilder_ == null) { - server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - serverBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (serverBuilder_ == null) { - result.server_ = server_; - } else { - result.server_ = serverBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance()) return this; - if (other.hasServer()) { - mergeServer(other.getServer()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasServer()) { - - return false; - } - if (!getServer().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - if (hasServer()) { - subBuilder.mergeFrom(getServer()); - } - input.readMessage(subBuilder, extensionRegistry); - setServer(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ServerName server = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; - public boolean hasServer() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { - if (serverBuilder_ == null) { - return server_; - } else { - return serverBuilder_.getMessage(); - } - } - public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - server_ = value; - onChanged(); - } else { - serverBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setServer( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverBuilder_ == null) { - server_ = builderForValue.build(); - onChanged(); - } else { - serverBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - server_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); - } else { - server_ = value; - } - onChanged(); - } else { - serverBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearServer() { - if (serverBuilder_ == null) { - server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - serverBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getServerFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { - if (serverBuilder_ != null) { - return serverBuilder_.getMessageOrBuilder(); - } else { - return server_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerFieldBuilder() { - if (serverBuilder_ == null) { - serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - server_, - getParentForChildren(), - isClean()); - server_ = null; - } - return serverBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RootRegionServer) - } - - static { - defaultInstance = new RootRegionServer(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RootRegionServer) - } - - public interface MasterOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ServerName master = 1; - boolean hasMaster(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder(); - } - public static final class Master extends - com.google.protobuf.GeneratedMessage - implements MasterOrBuilder { - // Use Master.newBuilder() to construct. - private Master(Builder builder) { - super(builder); - } - private Master(boolean noInit) {} - - private static final Master defaultInstance; - public static Master getDefaultInstance() { - return defaultInstance; - } - - public Master getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable; - } - - private int bitField0_; - // required .ServerName master = 1; - public static final int MASTER_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_; - public boolean hasMaster() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() { - return master_; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() { - return master_; - } - - private void initFields() { - master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasMaster()) { - memoizedIsInitialized = 0; - return false; - } - if (!getMaster().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, master_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, master_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) obj; - - boolean result = true; - result = result && (hasMaster() == other.hasMaster()); - if (hasMaster()) { - result = result && getMaster() - .equals(other.getMaster()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasMaster()) { - hash = (37 * hash) + MASTER_FIELD_NUMBER; - hash = (53 * hash) + getMaster().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MasterOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getMasterFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (masterBuilder_ == null) { - master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - masterBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (masterBuilder_ == null) { - result.master_ = master_; - } else { - result.master_ = masterBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance()) return this; - if (other.hasMaster()) { - mergeMaster(other.getMaster()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasMaster()) { - - return false; - } - if (!getMaster().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - if (hasMaster()) { - subBuilder.mergeFrom(getMaster()); - } - input.readMessage(subBuilder, extensionRegistry); - setMaster(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ServerName master = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_; - public boolean hasMaster() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() { - if (masterBuilder_ == null) { - return master_; - } else { - return masterBuilder_.getMessage(); - } - } - public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (masterBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - master_ = value; - onChanged(); - } else { - masterBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setMaster( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (masterBuilder_ == null) { - master_ = builderForValue.build(); - onChanged(); - } else { - masterBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (masterBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - master_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - master_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial(); - } else { - master_ = value; - } - onChanged(); - } else { - masterBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearMaster() { - if (masterBuilder_ == null) { - master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - masterBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getMasterFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() { - if (masterBuilder_ != null) { - return masterBuilder_.getMessageOrBuilder(); - } else { - return master_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getMasterFieldBuilder() { - if (masterBuilder_ == null) { - masterBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - master_, - getParentForChildren(), - isClean()); - master_ = null; - } - return masterBuilder_; - } - - // @@protoc_insertion_point(builder_scope:Master) - } - - static { - defaultInstance = new Master(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Master) - } - - public interface ClusterUpOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string startDate = 1; - boolean hasStartDate(); - String getStartDate(); - } - public static final class ClusterUp extends - com.google.protobuf.GeneratedMessage - implements ClusterUpOrBuilder { - // Use ClusterUp.newBuilder() to construct. - private ClusterUp(Builder builder) { - super(builder); - } - private ClusterUp(boolean noInit) {} - - private static final ClusterUp defaultInstance; - public static ClusterUp getDefaultInstance() { - return defaultInstance; - } - - public ClusterUp getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable; - } - - private int bitField0_; - // required string startDate = 1; - public static final int STARTDATE_FIELD_NUMBER = 1; - private java.lang.Object startDate_; - public boolean hasStartDate() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getStartDate() { - java.lang.Object ref = startDate_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - startDate_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getStartDateBytes() { - java.lang.Object ref = startDate_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - startDate_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - startDate_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasStartDate()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getStartDateBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getStartDateBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) obj; - - boolean result = true; - result = result && (hasStartDate() == other.hasStartDate()); - if (hasStartDate()) { - result = result && getStartDate() - .equals(other.getStartDate()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasStartDate()) { - hash = (37 * hash) + STARTDATE_FIELD_NUMBER; - hash = (53 * hash) + getStartDate().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - startDate_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.startDate_ = startDate_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance()) return this; - if (other.hasStartDate()) { - setStartDate(other.getStartDate()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasStartDate()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - startDate_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string startDate = 1; - private java.lang.Object startDate_ = ""; - public boolean hasStartDate() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getStartDate() { - java.lang.Object ref = startDate_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - startDate_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setStartDate(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - startDate_ = value; - onChanged(); - return this; - } - public Builder clearStartDate() { - bitField0_ = (bitField0_ & ~0x00000001); - startDate_ = getDefaultInstance().getStartDate(); - onChanged(); - return this; - } - void setStartDate(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - startDate_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ClusterUp) - } - - static { - defaultInstance = new ClusterUp(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ClusterUp) - } - - public interface RegionTransitionOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint32 eventTypeCode = 1; - boolean hasEventTypeCode(); - int getEventTypeCode(); - - // required bytes regionName = 2; - boolean hasRegionName(); - com.google.protobuf.ByteString getRegionName(); - - // required uint64 createTime = 3; - boolean hasCreateTime(); - long getCreateTime(); - - // required .ServerName serverName = 4; - boolean hasServerName(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); - - // optional bytes payload = 5; - boolean hasPayload(); - com.google.protobuf.ByteString getPayload(); - } - public static final class RegionTransition extends - com.google.protobuf.GeneratedMessage - implements RegionTransitionOrBuilder { - // Use RegionTransition.newBuilder() to construct. - private RegionTransition(Builder builder) { - super(builder); - } - private RegionTransition(boolean noInit) {} - - private static final RegionTransition defaultInstance; - public static RegionTransition getDefaultInstance() { - return defaultInstance; - } - - public RegionTransition getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable; - } - - private int bitField0_; - // required uint32 eventTypeCode = 1; - public static final int EVENTTYPECODE_FIELD_NUMBER = 1; - private int eventTypeCode_; - public boolean hasEventTypeCode() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getEventTypeCode() { - return eventTypeCode_; - } - - // required bytes regionName = 2; - public static final int REGIONNAME_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString regionName_; - public boolean hasRegionName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getRegionName() { - return regionName_; - } - - // required uint64 createTime = 3; - public static final int CREATETIME_FIELD_NUMBER = 3; - private long createTime_; - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getCreateTime() { - return createTime_; - } - - // required .ServerName serverName = 4; - public static final int SERVERNAME_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; - public boolean hasServerName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - return serverName_; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - return serverName_; - } - - // optional bytes payload = 5; - public static final int PAYLOAD_FIELD_NUMBER = 5; - private com.google.protobuf.ByteString payload_; - public boolean hasPayload() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public com.google.protobuf.ByteString getPayload() { - return payload_; - } - - private void initFields() { - eventTypeCode_ = 0; - regionName_ = com.google.protobuf.ByteString.EMPTY; - createTime_ = 0L; - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - payload_ = com.google.protobuf.ByteString.EMPTY; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasEventTypeCode()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRegionName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCreateTime()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasServerName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getServerName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, eventTypeCode_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, regionName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, createTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, serverName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, payload_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, eventTypeCode_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, regionName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, createTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, serverName_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, payload_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) obj; - - boolean result = true; - result = result && (hasEventTypeCode() == other.hasEventTypeCode()); - if (hasEventTypeCode()) { - result = result && (getEventTypeCode() - == other.getEventTypeCode()); - } - result = result && (hasRegionName() == other.hasRegionName()); - if (hasRegionName()) { - result = result && getRegionName() - .equals(other.getRegionName()); - } - result = result && (hasCreateTime() == other.hasCreateTime()); - if (hasCreateTime()) { - result = result && (getCreateTime() - == other.getCreateTime()); - } - result = result && (hasServerName() == other.hasServerName()); - if (hasServerName()) { - result = result && getServerName() - .equals(other.getServerName()); - } - result = result && (hasPayload() == other.hasPayload()); - if (hasPayload()) { - result = result && getPayload() - .equals(other.getPayload()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasEventTypeCode()) { - hash = (37 * hash) + EVENTTYPECODE_FIELD_NUMBER; - hash = (53 * hash) + getEventTypeCode(); - } - if (hasRegionName()) { - hash = (37 * hash) + REGIONNAME_FIELD_NUMBER; - hash = (53 * hash) + getRegionName().hashCode(); - } - if (hasCreateTime()) { - hash = (37 * hash) + CREATETIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCreateTime()); - } - if (hasServerName()) { - hash = (37 * hash) + SERVERNAME_FIELD_NUMBER; - hash = (53 * hash) + getServerName().hashCode(); - } - if (hasPayload()) { - hash = (37 * hash) + PAYLOAD_FIELD_NUMBER; - hash = (53 * hash) + getPayload().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransitionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - eventTypeCode_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - regionName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - createTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - payload_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.eventTypeCode_ = eventTypeCode_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.regionName_ = regionName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.createTime_ = createTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - if (serverNameBuilder_ == null) { - result.serverName_ = serverName_; - } else { - result.serverName_ = serverNameBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.payload_ = payload_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance()) return this; - if (other.hasEventTypeCode()) { - setEventTypeCode(other.getEventTypeCode()); - } - if (other.hasRegionName()) { - setRegionName(other.getRegionName()); - } - if (other.hasCreateTime()) { - setCreateTime(other.getCreateTime()); - } - if (other.hasServerName()) { - mergeServerName(other.getServerName()); - } - if (other.hasPayload()) { - setPayload(other.getPayload()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasEventTypeCode()) { - - return false; - } - if (!hasRegionName()) { - - return false; - } - if (!hasCreateTime()) { - - return false; - } - if (!hasServerName()) { - - return false; - } - if (!getServerName().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - eventTypeCode_ = input.readUInt32(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - regionName_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - createTime_ = input.readUInt64(); - break; - } - case 34: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - if (hasServerName()) { - subBuilder.mergeFrom(getServerName()); - } - input.readMessage(subBuilder, extensionRegistry); - setServerName(subBuilder.buildPartial()); - break; - } - case 42: { - bitField0_ |= 0x00000010; - payload_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required uint32 eventTypeCode = 1; - private int eventTypeCode_ ; - public boolean hasEventTypeCode() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public int getEventTypeCode() { - return eventTypeCode_; - } - public Builder setEventTypeCode(int value) { - bitField0_ |= 0x00000001; - eventTypeCode_ = value; - onChanged(); - return this; - } - public Builder clearEventTypeCode() { - bitField0_ = (bitField0_ & ~0x00000001); - eventTypeCode_ = 0; - onChanged(); - return this; - } - - // required bytes regionName = 2; - private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasRegionName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getRegionName() { - return regionName_; - } - public Builder setRegionName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - regionName_ = value; - onChanged(); - return this; - } - public Builder clearRegionName() { - bitField0_ = (bitField0_ & ~0x00000002); - regionName_ = getDefaultInstance().getRegionName(); - onChanged(); - return this; - } - - // required uint64 createTime = 3; - private long createTime_ ; - public boolean hasCreateTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getCreateTime() { - return createTime_; - } - public Builder setCreateTime(long value) { - bitField0_ |= 0x00000004; - createTime_ = value; - onChanged(); - return this; - } - public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000004); - createTime_ = 0L; - onChanged(); - return this; - } - - // required .ServerName serverName = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - public boolean hasServerName() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - if (serverNameBuilder_ == null) { - return serverName_; - } else { - return serverNameBuilder_.getMessage(); - } - } - public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - serverName_ = value; - onChanged(); - } else { - serverNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder setServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - serverName_ = builderForValue.build(); - onChanged(); - } else { - serverNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008) && - serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - serverName_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); - } else { - serverName_ = value; - } - onChanged(); - } else { - serverNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000008; - return this; - } - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000008); - return this; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { - bitField0_ |= 0x00000008; - onChanged(); - return getServerNameFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilder(); - } else { - return serverName_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - getParentForChildren(), - isClean()); - serverName_ = null; - } - return serverNameBuilder_; - } - - // optional bytes payload = 5; - private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasPayload() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public com.google.protobuf.ByteString getPayload() { - return payload_; - } - public Builder setPayload(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - payload_ = value; - onChanged(); - return this; - } - public Builder clearPayload() { - bitField0_ = (bitField0_ & ~0x00000010); - payload_ = getDefaultInstance().getPayload(); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:RegionTransition) - } - - static { - defaultInstance = new RegionTransition(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegionTransition) - } - - public interface SplitLogTaskOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .SplitLogTask.State state = 1; - boolean hasState(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState(); - - // required .ServerName serverName = 2; - boolean hasServerName(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); - } - public static final class SplitLogTask extends - com.google.protobuf.GeneratedMessage - implements SplitLogTaskOrBuilder { - // Use SplitLogTask.newBuilder() to construct. - private SplitLogTask(Builder builder) { - super(builder); - } - private SplitLogTask(boolean noInit) {} - - private static final SplitLogTask defaultInstance; - public static SplitLogTask getDefaultInstance() { - return defaultInstance; - } - - public SplitLogTask getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_fieldAccessorTable; - } - - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - UNASSIGNED(0, 0), - OWNED(1, 1), - RESIGNED(2, 2), - DONE(3, 3), - ERR(4, 4), - ; - - public static final int UNASSIGNED_VALUE = 0; - public static final int OWNED_VALUE = 1; - public static final int RESIGNED_VALUE = 2; - public static final int DONE_VALUE = 3; - public static final int ERR_VALUE = 4; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return UNASSIGNED; - case 1: return OWNED; - case 2: return RESIGNED; - case 3: return DONE; - case 4: return ERR; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = { - UNASSIGNED, OWNED, RESIGNED, DONE, ERR, - }; - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:SplitLogTask.State) - } - - private int bitField0_; - // required .SplitLogTask.State state = 1; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() { - return state_; - } - - // required .ServerName serverName = 2; - public static final int SERVERNAME_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; - public boolean hasServerName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - return serverName_; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - return serverName_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasServerName()) { - memoizedIsInitialized = 0; - return false; - } - if (!getServerName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, serverName_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, serverName_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && (hasServerName() == other.hasServerName()); - if (hasServerName()) { - result = result && getServerName() - .equals(other.getServerName()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - if (hasServerName()) { - hash = (37 * hash) + SERVERNAME_FIELD_NUMBER; - hash = (53 * hash) + getServerName().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTaskOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getServerNameFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; - bitField0_ = (bitField0_ & ~0x00000001); - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (serverNameBuilder_ == null) { - result.serverName_ = serverName_; - } else { - result.serverName_ = serverNameBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasServerName()) { - mergeServerName(other.getServerName()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - if (!hasServerName()) { - - return false; - } - if (!getServerName().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - if (hasServerName()) { - subBuilder.mergeFrom(getServerName()); - } - input.readMessage(subBuilder, extensionRegistry); - setServerName(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .SplitLogTask.State state = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() { - return state_; - } - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; - onChanged(); - return this; - } - - // required .ServerName serverName = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; - public boolean hasServerName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { - if (serverNameBuilder_ == null) { - return serverName_; - } else { - return serverNameBuilder_.getMessage(); - } - } - public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - serverName_ = value; - onChanged(); - } else { - serverNameBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setServerName( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (serverNameBuilder_ == null) { - serverName_ = builderForValue.build(); - onChanged(); - } else { - serverNameBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (serverNameBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - serverName_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); - } else { - serverName_ = value; - } - onChanged(); - } else { - serverNameBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearServerName() { - if (serverNameBuilder_ == null) { - serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - serverNameBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getServerNameFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { - if (serverNameBuilder_ != null) { - return serverNameBuilder_.getMessageOrBuilder(); - } else { - return serverName_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getServerNameFieldBuilder() { - if (serverNameBuilder_ == null) { - serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - serverName_, - getParentForChildren(), - isClean()); - serverName_ = null; - } - return serverNameBuilder_; - } - - // @@protoc_insertion_point(builder_scope:SplitLogTask) - } - - static { - defaultInstance = new SplitLogTask(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:SplitLogTask) - } - - public interface TableOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .Table.State state = 1 [default = ENABLED]; - boolean hasState(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); - } - public static final class Table extends - com.google.protobuf.GeneratedMessage - implements TableOrBuilder { - // Use Table.newBuilder() to construct. - private Table(Builder builder) { - super(builder); - } - private Table(boolean noInit) {} - - private static final Table defaultInstance; - public static Table getDefaultInstance() { - return defaultInstance; - } - - public Table getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable; - } - - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - ENABLED(0, 0), - DISABLED(1, 1), - DISABLING(2, 2), - ENABLING(3, 3), - ; - - public static final int ENABLED_VALUE = 0; - public static final int DISABLED_VALUE = 1; - public static final int DISABLING_VALUE = 2; - public static final int ENABLING_VALUE = 3; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return ENABLED; - case 1: return DISABLED; - case 2: return DISABLING; - case 3: return ENABLING; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = { - ENABLED, DISABLED, DISABLING, ENABLING, - }; - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:Table.State) - } - - private int bitField0_; - // required .Table.State state = 1 [default = ENABLED]; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { - return state_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .Table.State state = 1 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { - return state_; - } - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:Table) - } - - static { - defaultInstance = new Table(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Table) - } - - public interface ReplicationPeerOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string clusterkey = 1; - boolean hasClusterkey(); - String getClusterkey(); - } - public static final class ReplicationPeer extends - com.google.protobuf.GeneratedMessage - implements ReplicationPeerOrBuilder { - // Use ReplicationPeer.newBuilder() to construct. - private ReplicationPeer(Builder builder) { - super(builder); - } - private ReplicationPeer(boolean noInit) {} - - private static final ReplicationPeer defaultInstance; - public static ReplicationPeer getDefaultInstance() { - return defaultInstance; - } - - public ReplicationPeer getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_fieldAccessorTable; - } - - private int bitField0_; - // required string clusterkey = 1; - public static final int CLUSTERKEY_FIELD_NUMBER = 1; - private java.lang.Object clusterkey_; - public boolean hasClusterkey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getClusterkey() { - java.lang.Object ref = clusterkey_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - clusterkey_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getClusterkeyBytes() { - java.lang.Object ref = clusterkey_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - clusterkey_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - clusterkey_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasClusterkey()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getClusterkeyBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getClusterkeyBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) obj; - - boolean result = true; - result = result && (hasClusterkey() == other.hasClusterkey()); - if (hasClusterkey()) { - result = result && getClusterkey() - .equals(other.getClusterkey()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasClusterkey()) { - hash = (37 * hash) + CLUSTERKEY_FIELD_NUMBER; - hash = (53 * hash) + getClusterkey().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - clusterkey_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.clusterkey_ = clusterkey_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) return this; - if (other.hasClusterkey()) { - setClusterkey(other.getClusterkey()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasClusterkey()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - clusterkey_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string clusterkey = 1; - private java.lang.Object clusterkey_ = ""; - public boolean hasClusterkey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getClusterkey() { - java.lang.Object ref = clusterkey_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - clusterkey_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setClusterkey(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - clusterkey_ = value; - onChanged(); - return this; - } - public Builder clearClusterkey() { - bitField0_ = (bitField0_ & ~0x00000001); - clusterkey_ = getDefaultInstance().getClusterkey(); - onChanged(); - return this; - } - void setClusterkey(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - clusterkey_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ReplicationPeer) - } - - static { - defaultInstance = new ReplicationPeer(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReplicationPeer) - } - - public interface ReplicationStateOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ReplicationState.State state = 1; - boolean hasState(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState(); - } - public static final class ReplicationState extends - com.google.protobuf.GeneratedMessage - implements ReplicationStateOrBuilder { - // Use ReplicationState.newBuilder() to construct. - private ReplicationState(Builder builder) { - super(builder); - } - private ReplicationState(boolean noInit) {} - - private static final ReplicationState defaultInstance; - public static ReplicationState getDefaultInstance() { - return defaultInstance; - } - - public ReplicationState getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_fieldAccessorTable; - } - - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - ENABLED(0, 0), - DISABLED(1, 1), - ; - - public static final int ENABLED_VALUE = 0; - public static final int DISABLED_VALUE = 1; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return ENABLED; - case 1: return DISABLED; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = { - ENABLED, DISABLED, - }; - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:ReplicationState.State) - } - - private int bitField0_; - // required .ReplicationState.State state = 1; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() { - return state_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationStateOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - } - } - } - - private int bitField0_; - - // required .ReplicationState.State state = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() { - return state_; - } - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ReplicationState) - } - - static { - defaultInstance = new ReplicationState(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReplicationState) - } - - public interface ReplicationHLogPositionOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required int64 position = 1; - boolean hasPosition(); - long getPosition(); - } - public static final class ReplicationHLogPosition extends - com.google.protobuf.GeneratedMessage - implements ReplicationHLogPositionOrBuilder { - // Use ReplicationHLogPosition.newBuilder() to construct. - private ReplicationHLogPosition(Builder builder) { - super(builder); - } - private ReplicationHLogPosition(boolean noInit) {} - - private static final ReplicationHLogPosition defaultInstance; - public static ReplicationHLogPosition getDefaultInstance() { - return defaultInstance; - } - - public ReplicationHLogPosition getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_fieldAccessorTable; - } - - private int bitField0_; - // required int64 position = 1; - public static final int POSITION_FIELD_NUMBER = 1; - private long position_; - public boolean hasPosition() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getPosition() { - return position_; - } - - private void initFields() { - position_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasPosition()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, position_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, position_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) obj; - - boolean result = true; - result = result && (hasPosition() == other.hasPosition()); - if (hasPosition()) { - result = result && (getPosition() - == other.getPosition()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasPosition()) { - hash = (37 * hash) + POSITION_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getPosition()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPositionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - position_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.position_ = position_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance()) return this; - if (other.hasPosition()) { - setPosition(other.getPosition()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPosition()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - position_ = input.readInt64(); - break; - } - } - } - } - - private int bitField0_; - - // required int64 position = 1; - private long position_ ; - public boolean hasPosition() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getPosition() { - return position_; - } - public Builder setPosition(long value) { - bitField0_ |= 0x00000001; - position_ = value; - onChanged(); - return this; - } - public Builder clearPosition() { - bitField0_ = (bitField0_ & ~0x00000001); - position_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:ReplicationHLogPosition) - } - - static { - defaultInstance = new ReplicationHLogPosition(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReplicationHLogPosition) - } - - public interface ReplicationLockOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string lockOwner = 1; - boolean hasLockOwner(); - String getLockOwner(); - } - public static final class ReplicationLock extends - com.google.protobuf.GeneratedMessage - implements ReplicationLockOrBuilder { - // Use ReplicationLock.newBuilder() to construct. - private ReplicationLock(Builder builder) { - super(builder); - } - private ReplicationLock(boolean noInit) {} - - private static final ReplicationLock defaultInstance; - public static ReplicationLock getDefaultInstance() { - return defaultInstance; - } - - public ReplicationLock getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_fieldAccessorTable; - } - - private int bitField0_; - // required string lockOwner = 1; - public static final int LOCKOWNER_FIELD_NUMBER = 1; - private java.lang.Object lockOwner_; - public boolean hasLockOwner() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getLockOwner() { - java.lang.Object ref = lockOwner_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - lockOwner_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getLockOwnerBytes() { - java.lang.Object ref = lockOwner_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - lockOwner_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - lockOwner_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLockOwner()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getLockOwnerBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getLockOwnerBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) obj; - - boolean result = true; - result = result && (hasLockOwner() == other.hasLockOwner()); - if (hasLockOwner()) { - result = result && getLockOwner() - .equals(other.getLockOwner()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLockOwner()) { - hash = (37 * hash) + LOCKOWNER_FIELD_NUMBER; - hash = (53 * hash) + getLockOwner().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLockOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - lockOwner_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.lockOwner_ = lockOwner_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDefaultInstance()) return this; - if (other.hasLockOwner()) { - setLockOwner(other.getLockOwner()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLockOwner()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - lockOwner_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // required string lockOwner = 1; - private java.lang.Object lockOwner_ = ""; - public boolean hasLockOwner() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getLockOwner() { - java.lang.Object ref = lockOwner_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - lockOwner_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setLockOwner(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - lockOwner_ = value; - onChanged(); - return this; - } - public Builder clearLockOwner() { - bitField0_ = (bitField0_ & ~0x00000001); - lockOwner_ = getDefaultInstance().getLockOwner(); - onChanged(); - return this; - } - void setLockOwner(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - lockOwner_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:ReplicationLock) - } - - static { - defaultInstance = new ReplicationLock(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ReplicationLock) - } - - public interface TableLockOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional bytes tableName = 1; - boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); - - // optional .ServerName lockOwner = 2; - boolean hasLockOwner(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder(); - - // optional int64 threadId = 3; - boolean hasThreadId(); - long getThreadId(); - - // optional bool isShared = 4; - boolean hasIsShared(); - boolean getIsShared(); - - // optional string purpose = 5; - boolean hasPurpose(); - String getPurpose(); - } - public static final class TableLock extends - com.google.protobuf.GeneratedMessage - implements TableLockOrBuilder { - // Use TableLock.newBuilder() to construct. - private TableLock(Builder builder) { - super(builder); - } - private TableLock(boolean noInit) {} - - private static final TableLock defaultInstance; - public static TableLock getDefaultInstance() { - return defaultInstance; - } - - public TableLock getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_fieldAccessorTable; - } - - private int bitField0_; - // optional bytes tableName = 1; - public static final int TABLENAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; - public boolean hasTableName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - - // optional .ServerName lockOwner = 2; - public static final int LOCKOWNER_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_; - public boolean hasLockOwner() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() { - return lockOwner_; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() { - return lockOwner_; - } - - // optional int64 threadId = 3; - public static final int THREADID_FIELD_NUMBER = 3; - private long threadId_; - public boolean hasThreadId() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getThreadId() { - return threadId_; - } - - // optional bool isShared = 4; - public static final int ISSHARED_FIELD_NUMBER = 4; - private boolean isShared_; - public boolean hasIsShared() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getIsShared() { - return isShared_; - } - - // optional string purpose = 5; - public static final int PURPOSE_FIELD_NUMBER = 5; - private java.lang.Object purpose_; - public boolean hasPurpose() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public String getPurpose() { - java.lang.Object ref = purpose_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - purpose_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getPurposeBytes() { - java.lang.Object ref = purpose_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - purpose_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; - lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - threadId_ = 0L; - isShared_ = false; - purpose_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (hasLockOwner()) { - if (!getLockOwner().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, lockOwner_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, threadId_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(4, isShared_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(5, getPurposeBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, lockOwner_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, threadId_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, isShared_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(5, getPurposeBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) obj; - - boolean result = true; - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); - } - result = result && (hasLockOwner() == other.hasLockOwner()); - if (hasLockOwner()) { - result = result && getLockOwner() - .equals(other.getLockOwner()); - } - result = result && (hasThreadId() == other.hasThreadId()); - if (hasThreadId()) { - result = result && (getThreadId() - == other.getThreadId()); - } - result = result && (hasIsShared() == other.hasIsShared()); - if (hasIsShared()) { - result = result && (getIsShared() - == other.getIsShared()); - } - result = result && (hasPurpose() == other.hasPurpose()); - if (hasPurpose()) { - result = result && getPurpose() - .equals(other.getPurpose()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - @java.lang.Override - public int hashCode() { - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTableName()) { - hash = (37 * hash) + TABLENAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); - } - if (hasLockOwner()) { - hash = (37 * hash) + LOCKOWNER_FIELD_NUMBER; - hash = (53 * hash) + getLockOwner().hashCode(); - } - if (hasThreadId()) { - hash = (37 * hash) + THREADID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getThreadId()); - } - if (hasIsShared()) { - hash = (37 * hash) + ISSHARED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getIsShared()); - } - if (hasPurpose()) { - hash = (37 * hash) + PURPOSE_FIELD_NUMBER; - hash = (53 * hash) + getPurpose().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLockOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_fieldAccessorTable; - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getLockOwnerFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - if (lockOwnerBuilder_ == null) { - lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - } else { - lockOwnerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - threadId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - isShared_ = false; - bitField0_ = (bitField0_ & ~0x00000008); - purpose_ = ""; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.tableName_ = tableName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (lockOwnerBuilder_ == null) { - result.lockOwner_ = lockOwner_; - } else { - result.lockOwner_ = lockOwnerBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.threadId_ = threadId_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.isShared_ = isShared_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.purpose_ = purpose_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this; - if (other.hasTableName()) { - setTableName(other.getTableName()); - } - if (other.hasLockOwner()) { - mergeLockOwner(other.getLockOwner()); - } - if (other.hasThreadId()) { - setThreadId(other.getThreadId()); - } - if (other.hasIsShared()) { - setIsShared(other.getIsShared()); - } - if (other.hasPurpose()) { - setPurpose(other.getPurpose()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (hasLockOwner()) { - if (!getLockOwner().isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); - break; - } - case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - if (hasLockOwner()) { - subBuilder.mergeFrom(getLockOwner()); - } - input.readMessage(subBuilder, extensionRegistry); - setLockOwner(subBuilder.buildPartial()); - break; - } - case 24: { - bitField0_ |= 0x00000004; - threadId_ = input.readInt64(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - isShared_ = input.readBool(); - break; - } - case 42: { - bitField0_ |= 0x00000010; - purpose_ = input.readBytes(); - break; - } - } - } - } - - private int bitField0_; - - // optional bytes tableName = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasTableName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); - return this; - } - public Builder clearTableName() { - bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); - return this; - } - - // optional .ServerName lockOwner = 2; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_; - public boolean hasLockOwner() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() { - if (lockOwnerBuilder_ == null) { - return lockOwner_; - } else { - return lockOwnerBuilder_.getMessage(); - } - } - public Builder setLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (lockOwnerBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - lockOwner_ = value; - onChanged(); - } else { - lockOwnerBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setLockOwner( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (lockOwnerBuilder_ == null) { - lockOwner_ = builderForValue.build(); - onChanged(); - } else { - lockOwnerBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (lockOwnerBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - lockOwner_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - lockOwner_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(lockOwner_).mergeFrom(value).buildPartial(); - } else { - lockOwner_ = value; - } - onChanged(); - } else { - lockOwnerBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearLockOwner() { - if (lockOwnerBuilder_ == null) { - lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); - onChanged(); - } else { - lockOwnerBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getLockOwnerBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getLockOwnerFieldBuilder().getBuilder(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() { - if (lockOwnerBuilder_ != null) { - return lockOwnerBuilder_.getMessageOrBuilder(); - } else { - return lockOwner_; - } - } - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getLockOwnerFieldBuilder() { - if (lockOwnerBuilder_ == null) { - lockOwnerBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - lockOwner_, - getParentForChildren(), - isClean()); - lockOwner_ = null; - } - return lockOwnerBuilder_; - } - - // optional int64 threadId = 3; - private long threadId_ ; - public boolean hasThreadId() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getThreadId() { - return threadId_; - } - public Builder setThreadId(long value) { - bitField0_ |= 0x00000004; - threadId_ = value; - onChanged(); - return this; - } - public Builder clearThreadId() { - bitField0_ = (bitField0_ & ~0x00000004); - threadId_ = 0L; - onChanged(); - return this; - } - - // optional bool isShared = 4; - private boolean isShared_ ; - public boolean hasIsShared() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public boolean getIsShared() { - return isShared_; - } - public Builder setIsShared(boolean value) { - bitField0_ |= 0x00000008; - isShared_ = value; - onChanged(); - return this; - } - public Builder clearIsShared() { - bitField0_ = (bitField0_ & ~0x00000008); - isShared_ = false; - onChanged(); - return this; - } - - // optional string purpose = 5; - private java.lang.Object purpose_ = ""; - public boolean hasPurpose() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - public String getPurpose() { - java.lang.Object ref = purpose_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - purpose_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setPurpose(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - purpose_ = value; - onChanged(); - return this; - } - public Builder clearPurpose() { - bitField0_ = (bitField0_ & ~0x00000010); - purpose_ = getDefaultInstance().getPurpose(); - onChanged(); - return this; - } - void setPurpose(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000010; - purpose_ = value; - onChanged(); - } - - // @@protoc_insertion_point(builder_scope:TableLock) - } - - static { - defaultInstance = new TableLock(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:TableLock) - } - - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RootRegionServer_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RootRegionServer_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_Master_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Master_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ClusterUp_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ClusterUp_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegionTransition_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegionTransition_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_SplitLogTask_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_SplitLogTask_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_Table_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Table_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReplicationPeer_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReplicationPeer_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReplicationState_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReplicationState_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReplicationHLogPosition_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReplicationHLogPosition_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_ReplicationLock_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ReplicationLock_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_TableLock_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_TableLock_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" + - "gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" + - "%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" + - "\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" + - "nTransition\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nre" + - "gionName\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022\037\n\nse" + - "rverName\030\004 \002(\0132\013.ServerName\022\017\n\007payload\030\005" + - " \001(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002(\0162\023.S" + - "plitLogTask.State\022\037\n\nserverName\030\002 \002(\0132\013." + - "ServerName\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005O", - "WNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\"" + - "n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007E" + - "NABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED" + - "\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"%\n\017Repli" + - "cationPeer\022\022\n\nclusterkey\030\001 \002(\t\"^\n\020Replic" + - "ationState\022&\n\005state\030\001 \002(\0162\027.ReplicationS" + - "tate.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" + - "BLED\020\001\"+\n\027ReplicationHLogPosition\022\020\n\010pos" + - "ition\030\001 \002(\003\"$\n\017ReplicationLock\022\021\n\tlockOw" + - "ner\030\001 \002(\t\"s\n\tTableLock\022\021\n\ttableName\030\001 \001(", - "\014\022\036\n\tlockOwner\030\002 \001(\0132\013.ServerName\022\020\n\010thr" + - "eadId\030\003 \001(\003\022\020\n\010isShared\030\004 \001(\010\022\017\n\007purpose" + - "\030\005 \001(\tBE\n*org.apache.hadoop.hbase.protob" + - "uf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - internal_static_RootRegionServer_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_RootRegionServer_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RootRegionServer_descriptor, - new java.lang.String[] { "Server", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.Builder.class); - internal_static_Master_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_Master_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Master_descriptor, - new java.lang.String[] { "Master", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class); - internal_static_ClusterUp_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_ClusterUp_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ClusterUp_descriptor, - new java.lang.String[] { "StartDate", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class); - internal_static_RegionTransition_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_RegionTransition_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegionTransition_descriptor, - new java.lang.String[] { "EventTypeCode", "RegionName", "CreateTime", "ServerName", "Payload", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class); - internal_static_SplitLogTask_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_SplitLogTask_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_SplitLogTask_descriptor, - new java.lang.String[] { "State", "ServerName", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class); - internal_static_Table_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_Table_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Table_descriptor, - new java.lang.String[] { "State", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); - internal_static_ReplicationPeer_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_ReplicationPeer_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReplicationPeer_descriptor, - new java.lang.String[] { "Clusterkey", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder.class); - internal_static_ReplicationState_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_ReplicationState_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReplicationState_descriptor, - new java.lang.String[] { "State", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder.class); - internal_static_ReplicationHLogPosition_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_ReplicationHLogPosition_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReplicationHLogPosition_descriptor, - new java.lang.String[] { "Position", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder.class); - internal_static_ReplicationLock_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_ReplicationLock_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ReplicationLock_descriptor, - new java.lang.String[] { "LockOwner", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.Builder.class); - internal_static_TableLock_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_TableLock_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_TableLock_descriptor, - new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", }, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class); - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), - }, assigner); - } - - // @@protoc_insertion_point(outer_class_scope) -} +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ZooKeeper.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class ZooKeeperProtos { + private ZooKeeperProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RootRegionServerOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ServerName server = 1; + boolean hasServer(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); + } + public static final class RootRegionServer extends + com.google.protobuf.GeneratedMessage + implements RootRegionServerOrBuilder { + // Use RootRegionServer.newBuilder() to construct. + private RootRegionServer(Builder builder) { + super(builder); + } + private RootRegionServer(boolean noInit) {} + + private static final RootRegionServer defaultInstance; + public static RootRegionServer getDefaultInstance() { + return defaultInstance; + } + + public RootRegionServer getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable; + } + + private int bitField0_; + // required .ServerName server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + return server_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + return server_; + } + + private void initFields() { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, server_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, server_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance()) return this; + if (other.hasServer()) { + mergeServer(other.getServer()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!getServer().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasServer()) { + subBuilder.mergeFrom(getServer()); + } + input.readMessage(subBuilder, extensionRegistry); + setServer(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .ServerName server = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RootRegionServer) + } + + static { + defaultInstance = new RootRegionServer(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RootRegionServer) + } + + public interface MasterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ServerName master = 1; + boolean hasMaster(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder(); + } + public static final class Master extends + com.google.protobuf.GeneratedMessage + implements MasterOrBuilder { + // Use Master.newBuilder() to construct. + private Master(Builder builder) { + super(builder); + } + private Master(boolean noInit) {} + + private static final Master defaultInstance; + public static Master getDefaultInstance() { + return defaultInstance; + } + + public Master getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable; + } + + private int bitField0_; + // required .ServerName master = 1; + public static final int MASTER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_; + public boolean hasMaster() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() { + return master_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() { + return master_; + } + + private void initFields() { + master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMaster()) { + memoizedIsInitialized = 0; + return false; + } + if (!getMaster().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, master_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, master_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) obj; + + boolean result = true; + result = result && (hasMaster() == other.hasMaster()); + if (hasMaster()) { + result = result && getMaster() + .equals(other.getMaster()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMaster()) { + hash = (37 * hash) + MASTER_FIELD_NUMBER; + hash = (53 * hash) + getMaster().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MasterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Master_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getMasterFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (masterBuilder_ == null) { + master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + masterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (masterBuilder_ == null) { + result.master_ = master_; + } else { + result.master_ = masterBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance()) return this; + if (other.hasMaster()) { + mergeMaster(other.getMaster()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMaster()) { + + return false; + } + if (!getMaster().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasMaster()) { + subBuilder.mergeFrom(getMaster()); + } + input.readMessage(subBuilder, extensionRegistry); + setMaster(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .ServerName master = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_; + public boolean hasMaster() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() { + if (masterBuilder_ == null) { + return master_; + } else { + return masterBuilder_.getMessage(); + } + } + public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (masterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + master_ = value; + onChanged(); + } else { + masterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setMaster( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (masterBuilder_ == null) { + master_ = builderForValue.build(); + onChanged(); + } else { + masterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (masterBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + master_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + master_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial(); + } else { + master_ = value; + } + onChanged(); + } else { + masterBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearMaster() { + if (masterBuilder_ == null) { + master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + masterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getMasterFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() { + if (masterBuilder_ != null) { + return masterBuilder_.getMessageOrBuilder(); + } else { + return master_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getMasterFieldBuilder() { + if (masterBuilder_ == null) { + masterBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + master_, + getParentForChildren(), + isClean()); + master_ = null; + } + return masterBuilder_; + } + + // @@protoc_insertion_point(builder_scope:Master) + } + + static { + defaultInstance = new Master(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Master) + } + + public interface ClusterUpOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string startDate = 1; + boolean hasStartDate(); + String getStartDate(); + } + public static final class ClusterUp extends + com.google.protobuf.GeneratedMessage + implements ClusterUpOrBuilder { + // Use ClusterUp.newBuilder() to construct. + private ClusterUp(Builder builder) { + super(builder); + } + private ClusterUp(boolean noInit) {} + + private static final ClusterUp defaultInstance; + public static ClusterUp getDefaultInstance() { + return defaultInstance; + } + + public ClusterUp getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable; + } + + private int bitField0_; + // required string startDate = 1; + public static final int STARTDATE_FIELD_NUMBER = 1; + private java.lang.Object startDate_; + public boolean hasStartDate() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getStartDate() { + java.lang.Object ref = startDate_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + startDate_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getStartDateBytes() { + java.lang.Object ref = startDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + startDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + startDate_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasStartDate()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getStartDateBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getStartDateBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) obj; + + boolean result = true; + result = result && (hasStartDate() == other.hasStartDate()); + if (hasStartDate()) { + result = result && getStartDate() + .equals(other.getStartDate()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasStartDate()) { + hash = (37 * hash) + STARTDATE_FIELD_NUMBER; + hash = (53 * hash) + getStartDate().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ClusterUp_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + startDate_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.startDate_ = startDate_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance()) return this; + if (other.hasStartDate()) { + setStartDate(other.getStartDate()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasStartDate()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + startDate_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string startDate = 1; + private java.lang.Object startDate_ = ""; + public boolean hasStartDate() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getStartDate() { + java.lang.Object ref = startDate_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + startDate_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setStartDate(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + startDate_ = value; + onChanged(); + return this; + } + public Builder clearStartDate() { + bitField0_ = (bitField0_ & ~0x00000001); + startDate_ = getDefaultInstance().getStartDate(); + onChanged(); + return this; + } + void setStartDate(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + startDate_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:ClusterUp) + } + + static { + defaultInstance = new ClusterUp(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ClusterUp) + } + + public interface RegionTransitionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 eventTypeCode = 1; + boolean hasEventTypeCode(); + int getEventTypeCode(); + + // required bytes regionName = 2; + boolean hasRegionName(); + com.google.protobuf.ByteString getRegionName(); + + // required uint64 createTime = 3; + boolean hasCreateTime(); + long getCreateTime(); + + // required .ServerName serverName = 4; + boolean hasServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // optional bytes payload = 5; + boolean hasPayload(); + com.google.protobuf.ByteString getPayload(); + } + public static final class RegionTransition extends + com.google.protobuf.GeneratedMessage + implements RegionTransitionOrBuilder { + // Use RegionTransition.newBuilder() to construct. + private RegionTransition(Builder builder) { + super(builder); + } + private RegionTransition(boolean noInit) {} + + private static final RegionTransition defaultInstance; + public static RegionTransition getDefaultInstance() { + return defaultInstance; + } + + public RegionTransition getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable; + } + + private int bitField0_; + // required uint32 eventTypeCode = 1; + public static final int EVENTTYPECODE_FIELD_NUMBER = 1; + private int eventTypeCode_; + public boolean hasEventTypeCode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getEventTypeCode() { + return eventTypeCode_; + } + + // required bytes regionName = 2; + public static final int REGIONNAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString regionName_; + public boolean hasRegionName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getRegionName() { + return regionName_; + } + + // required uint64 createTime = 3; + public static final int CREATETIME_FIELD_NUMBER = 3; + private long createTime_; + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getCreateTime() { + return createTime_; + } + + // required .ServerName serverName = 4; + public static final int SERVERNAME_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; + public boolean hasServerName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; + } + + // optional bytes payload = 5; + public static final int PAYLOAD_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString payload_; + public boolean hasPayload() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public com.google.protobuf.ByteString getPayload() { + return payload_; + } + + private void initFields() { + eventTypeCode_ = 0; + regionName_ = com.google.protobuf.ByteString.EMPTY; + createTime_ = 0L; + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + payload_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEventTypeCode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCreateTime()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, eventTypeCode_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, regionName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, createTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, serverName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, payload_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, eventTypeCode_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, regionName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, createTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, serverName_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, payload_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) obj; + + boolean result = true; + result = result && (hasEventTypeCode() == other.hasEventTypeCode()); + if (hasEventTypeCode()) { + result = result && (getEventTypeCode() + == other.getEventTypeCode()); + } + result = result && (hasRegionName() == other.hasRegionName()); + if (hasRegionName()) { + result = result && getRegionName() + .equals(other.getRegionName()); + } + result = result && (hasCreateTime() == other.hasCreateTime()); + if (hasCreateTime()) { + result = result && (getCreateTime() + == other.getCreateTime()); + } + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && (hasPayload() == other.hasPayload()); + if (hasPayload()) { + result = result && getPayload() + .equals(other.getPayload()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasEventTypeCode()) { + hash = (37 * hash) + EVENTTYPECODE_FIELD_NUMBER; + hash = (53 * hash) + getEventTypeCode(); + } + if (hasRegionName()) { + hash = (37 * hash) + REGIONNAME_FIELD_NUMBER; + hash = (53 * hash) + getRegionName().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATETIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCreateTime()); + } + if (hasServerName()) { + hash = (37 * hash) + SERVERNAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); + } + if (hasPayload()) { + hash = (37 * hash) + PAYLOAD_FIELD_NUMBER; + hash = (53 * hash) + getPayload().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + eventTypeCode_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + regionName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + payload_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.eventTypeCode_ = eventTypeCode_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.regionName_ = regionName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.payload_ = payload_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance()) return this; + if (other.hasEventTypeCode()) { + setEventTypeCode(other.getEventTypeCode()); + } + if (other.hasRegionName()) { + setRegionName(other.getRegionName()); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasServerName()) { + mergeServerName(other.getServerName()); + } + if (other.hasPayload()) { + setPayload(other.getPayload()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEventTypeCode()) { + + return false; + } + if (!hasRegionName()) { + + return false; + } + if (!hasCreateTime()) { + + return false; + } + if (!hasServerName()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + eventTypeCode_ = input.readUInt32(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + regionName_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + createTime_ = input.readUInt64(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasServerName()) { + subBuilder.mergeFrom(getServerName()); + } + input.readMessage(subBuilder, extensionRegistry); + setServerName(subBuilder.buildPartial()); + break; + } + case 42: { + bitField0_ |= 0x00000010; + payload_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required uint32 eventTypeCode = 1; + private int eventTypeCode_ ; + public boolean hasEventTypeCode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getEventTypeCode() { + return eventTypeCode_; + } + public Builder setEventTypeCode(int value) { + bitField0_ |= 0x00000001; + eventTypeCode_ = value; + onChanged(); + return this; + } + public Builder clearEventTypeCode() { + bitField0_ = (bitField0_ & ~0x00000001); + eventTypeCode_ = 0; + onChanged(); + return this; + } + + // required bytes regionName = 2; + private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRegionName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getRegionName() { + return regionName_; + } + public Builder setRegionName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + regionName_ = value; + onChanged(); + return this; + } + public Builder clearRegionName() { + bitField0_ = (bitField0_ & ~0x00000002); + regionName_ = getDefaultInstance().getRegionName(); + onChanged(); + return this; + } + + // required uint64 createTime = 3; + private long createTime_ ; + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getCreateTime() { + return createTime_; + } + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000004; + createTime_ = value; + onChanged(); + return this; + } + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = 0L; + onChanged(); + return this; + } + + // required .ServerName serverName = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + public boolean hasServerName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; + } else { + return serverNameBuilder_.getMessage(); + } + } + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serverName_ = value; + onChanged(); + } else { + serverNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder setServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); + onChanged(); + } else { + serverNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); + } else { + serverName_ = value; + } + onChanged(); + } else { + serverNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getServerNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); + } else { + return serverName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; + } + + // optional bytes payload = 5; + private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasPayload() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public com.google.protobuf.ByteString getPayload() { + return payload_; + } + public Builder setPayload(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + payload_ = value; + onChanged(); + return this; + } + public Builder clearPayload() { + bitField0_ = (bitField0_ & ~0x00000010); + payload_ = getDefaultInstance().getPayload(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RegionTransition) + } + + static { + defaultInstance = new RegionTransition(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionTransition) + } + + public interface SplitLogTaskOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .SplitLogTask.State state = 1; + boolean hasState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState(); + + // required .ServerName serverName = 2; + boolean hasServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + } + public static final class SplitLogTask extends + com.google.protobuf.GeneratedMessage + implements SplitLogTaskOrBuilder { + // Use SplitLogTask.newBuilder() to construct. + private SplitLogTask(Builder builder) { + super(builder); + } + private SplitLogTask(boolean noInit) {} + + private static final SplitLogTask defaultInstance; + public static SplitLogTask getDefaultInstance() { + return defaultInstance; + } + + public SplitLogTask getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_fieldAccessorTable; + } + + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + UNASSIGNED(0, 0), + OWNED(1, 1), + RESIGNED(2, 2), + DONE(3, 3), + ERR(4, 4), + ; + + public static final int UNASSIGNED_VALUE = 0; + public static final int OWNED_VALUE = 1; + public static final int RESIGNED_VALUE = 2; + public static final int DONE_VALUE = 3; + public static final int ERR_VALUE = 4; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return UNASSIGNED; + case 1: return OWNED; + case 2: return RESIGNED; + case 3: return DONE; + case 4: return ERR; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = { + UNASSIGNED, OWNED, RESIGNED, DONE, ERR, + }; + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:SplitLogTask.State) + } + + private int bitField0_; + // required .SplitLogTask.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() { + return state_; + } + + // required .ServerName serverName = 2; + public static final int SERVERNAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; + public boolean hasServerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, serverName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, serverName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasServerName()) { + hash = (37 * hash) + SERVERNAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTaskOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; + bitField0_ = (bitField0_ & ~0x00000001); + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasServerName()) { + mergeServerName(other.getServerName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + if (!hasServerName()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasServerName()) { + subBuilder.mergeFrom(getServerName()); + } + input.readMessage(subBuilder, extensionRegistry); + setServerName(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .SplitLogTask.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() { + return state_; + } + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; + onChanged(); + return this; + } + + // required .ServerName serverName = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + public boolean hasServerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; + } else { + return serverNameBuilder_.getMessage(); + } + } + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serverName_ = value; + onChanged(); + } else { + serverNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); + onChanged(); + } else { + serverNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); + } else { + serverName_ = value; + } + onChanged(); + } else { + serverNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getServerNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); + } else { + return serverName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:SplitLogTask) + } + + static { + defaultInstance = new SplitLogTask(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SplitLogTask) + } + + public interface TableOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .Table.State state = 1 [default = ENABLED]; + boolean hasState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); + } + public static final class Table extends + com.google.protobuf.GeneratedMessage + implements TableOrBuilder { + // Use Table.newBuilder() to construct. + private Table(Builder builder) { + super(builder); + } + private Table(boolean noInit) {} + + private static final Table defaultInstance; + public static Table getDefaultInstance() { + return defaultInstance; + } + + public Table getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable; + } + + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + ENABLED(0, 0), + DISABLED(1, 1), + DISABLING(2, 2), + ENABLING(3, 3), + CREATINGTABLE(4, 4), + ; + + public static final int ENABLED_VALUE = 0; + public static final int DISABLED_VALUE = 1; + public static final int DISABLING_VALUE = 2; + public static final int ENABLING_VALUE = 3; + public static final int CREATINGTABLE_VALUE = 4; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return ENABLED; + case 1: return DISABLED; + case 2: return DISABLING; + case 3: return ENABLING; + case 4: return CREATINGTABLE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = { + ENABLED, DISABLED, DISABLING, ENABLING, CREATINGTABLE, + }; + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:Table.State) + } + + private int bitField0_; + // required .Table.State state = 1 [default = ENABLED]; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { + return state_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + } + } + } + + private int bitField0_; + + // required .Table.State state = 1 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { + return state_; + } + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Table) + } + + static { + defaultInstance = new Table(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Table) + } + + public interface TableCreationStatusOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableCreationStatus.State state = 1 [default = CREATED_TD]; + boolean hasState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State getState(); + } + public static final class TableCreationStatus extends + com.google.protobuf.GeneratedMessage + implements TableCreationStatusOrBuilder { + // Use TableCreationStatus.newBuilder() to construct. + private TableCreationStatus(Builder builder) { + super(builder); + } + private TableCreationStatus(boolean noInit) {} + + private static final TableCreationStatus defaultInstance; + public static TableCreationStatus getDefaultInstance() { + return defaultInstance; + } + + public TableCreationStatus getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableCreationStatus_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableCreationStatus_fieldAccessorTable; + } + + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + CREATED_TD(0, 0), + CREATED_REGIONINFO(1, 1), + MOVED_TO_ORIG_LOCATION(2, 2), + ADD_TO_META(3, 3), + ASSIGN_USER_REGIONS(4, 4), + ; + + public static final State DONE = ASSIGN_USER_REGIONS; + public static final int CREATED_TD_VALUE = 0; + public static final int CREATED_REGIONINFO_VALUE = 1; + public static final int MOVED_TO_ORIG_LOCATION_VALUE = 2; + public static final int ADD_TO_META_VALUE = 3; + public static final int ASSIGN_USER_REGIONS_VALUE = 4; + public static final int DONE_VALUE = 4; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return CREATED_TD; + case 1: return CREATED_REGIONINFO; + case 2: return MOVED_TO_ORIG_LOCATION; + case 3: return ADD_TO_META; + case 4: return ASSIGN_USER_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = { + CREATED_TD, CREATED_REGIONINFO, MOVED_TO_ORIG_LOCATION, ADD_TO_META, ASSIGN_USER_REGIONS, DONE, + }; + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:TableCreationStatus.State) + } + + private int bitField0_; + // required .TableCreationStatus.State state = 1 [default = CREATED_TD]; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State state_; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State getState() { + return state_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State.CREATED_TD; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableCreationStatus_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableCreationStatus_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State.CREATED_TD; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + } + } + } + + private int bitField0_; + + // required .TableCreationStatus.State state = 1 [default = CREATED_TD]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State.CREATED_TD; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State getState() { + return state_; + } + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.State.CREATED_TD; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableCreationStatus) + } + + static { + defaultInstance = new TableCreationStatus(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableCreationStatus) + } + + public interface ReplicationPeerOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string clusterkey = 1; + boolean hasClusterkey(); + String getClusterkey(); + } + public static final class ReplicationPeer extends + com.google.protobuf.GeneratedMessage + implements ReplicationPeerOrBuilder { + // Use ReplicationPeer.newBuilder() to construct. + private ReplicationPeer(Builder builder) { + super(builder); + } + private ReplicationPeer(boolean noInit) {} + + private static final ReplicationPeer defaultInstance; + public static ReplicationPeer getDefaultInstance() { + return defaultInstance; + } + + public ReplicationPeer getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_fieldAccessorTable; + } + + private int bitField0_; + // required string clusterkey = 1; + public static final int CLUSTERKEY_FIELD_NUMBER = 1; + private java.lang.Object clusterkey_; + public boolean hasClusterkey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getClusterkey() { + java.lang.Object ref = clusterkey_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + clusterkey_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getClusterkeyBytes() { + java.lang.Object ref = clusterkey_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + clusterkey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + clusterkey_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasClusterkey()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClusterkeyBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getClusterkeyBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) obj; + + boolean result = true; + result = result && (hasClusterkey() == other.hasClusterkey()); + if (hasClusterkey()) { + result = result && getClusterkey() + .equals(other.getClusterkey()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasClusterkey()) { + hash = (37 * hash) + CLUSTERKEY_FIELD_NUMBER; + hash = (53 * hash) + getClusterkey().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationPeer_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + clusterkey_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.clusterkey_ = clusterkey_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) return this; + if (other.hasClusterkey()) { + setClusterkey(other.getClusterkey()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasClusterkey()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + clusterkey_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string clusterkey = 1; + private java.lang.Object clusterkey_ = ""; + public boolean hasClusterkey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getClusterkey() { + java.lang.Object ref = clusterkey_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + clusterkey_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setClusterkey(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + clusterkey_ = value; + onChanged(); + return this; + } + public Builder clearClusterkey() { + bitField0_ = (bitField0_ & ~0x00000001); + clusterkey_ = getDefaultInstance().getClusterkey(); + onChanged(); + return this; + } + void setClusterkey(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + clusterkey_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:ReplicationPeer) + } + + static { + defaultInstance = new ReplicationPeer(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationPeer) + } + + public interface ReplicationStateOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ReplicationState.State state = 1; + boolean hasState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState(); + } + public static final class ReplicationState extends + com.google.protobuf.GeneratedMessage + implements ReplicationStateOrBuilder { + // Use ReplicationState.newBuilder() to construct. + private ReplicationState(Builder builder) { + super(builder); + } + private ReplicationState(boolean noInit) {} + + private static final ReplicationState defaultInstance; + public static ReplicationState getDefaultInstance() { + return defaultInstance; + } + + public ReplicationState getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_fieldAccessorTable; + } + + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + ENABLED(0, 0), + DISABLED(1, 1), + ; + + public static final int ENABLED_VALUE = 0; + public static final int DISABLED_VALUE = 1; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return ENABLED; + case 1: return DISABLED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = { + ENABLED, DISABLED, + }; + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ReplicationState.State) + } + + private int bitField0_; + // required .ReplicationState.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() { + return state_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationState_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + } + } + } + + private int bitField0_; + + // required .ReplicationState.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() { + return state_; + } + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationState) + } + + static { + defaultInstance = new ReplicationState(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationState) + } + + public interface ReplicationHLogPositionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int64 position = 1; + boolean hasPosition(); + long getPosition(); + } + public static final class ReplicationHLogPosition extends + com.google.protobuf.GeneratedMessage + implements ReplicationHLogPositionOrBuilder { + // Use ReplicationHLogPosition.newBuilder() to construct. + private ReplicationHLogPosition(Builder builder) { + super(builder); + } + private ReplicationHLogPosition(boolean noInit) {} + + private static final ReplicationHLogPosition defaultInstance; + public static ReplicationHLogPosition getDefaultInstance() { + return defaultInstance; + } + + public ReplicationHLogPosition getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_fieldAccessorTable; + } + + private int bitField0_; + // required int64 position = 1; + public static final int POSITION_FIELD_NUMBER = 1; + private long position_; + public boolean hasPosition() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getPosition() { + return position_; + } + + private void initFields() { + position_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPosition()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, position_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, position_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) obj; + + boolean result = true; + result = result && (hasPosition() == other.hasPosition()); + if (hasPosition()) { + result = result && (getPosition() + == other.getPosition()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPosition()) { + hash = (37 * hash) + POSITION_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getPosition()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPositionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationHLogPosition_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + position_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.position_ = position_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance()) return this; + if (other.hasPosition()) { + setPosition(other.getPosition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPosition()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + position_ = input.readInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required int64 position = 1; + private long position_ ; + public boolean hasPosition() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getPosition() { + return position_; + } + public Builder setPosition(long value) { + bitField0_ |= 0x00000001; + position_ = value; + onChanged(); + return this; + } + public Builder clearPosition() { + bitField0_ = (bitField0_ & ~0x00000001); + position_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationHLogPosition) + } + + static { + defaultInstance = new ReplicationHLogPosition(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationHLogPosition) + } + + public interface ReplicationLockOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string lockOwner = 1; + boolean hasLockOwner(); + String getLockOwner(); + } + public static final class ReplicationLock extends + com.google.protobuf.GeneratedMessage + implements ReplicationLockOrBuilder { + // Use ReplicationLock.newBuilder() to construct. + private ReplicationLock(Builder builder) { + super(builder); + } + private ReplicationLock(boolean noInit) {} + + private static final ReplicationLock defaultInstance; + public static ReplicationLock getDefaultInstance() { + return defaultInstance; + } + + public ReplicationLock getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_fieldAccessorTable; + } + + private int bitField0_; + // required string lockOwner = 1; + public static final int LOCKOWNER_FIELD_NUMBER = 1; + private java.lang.Object lockOwner_; + public boolean hasLockOwner() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getLockOwner() { + java.lang.Object ref = lockOwner_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + lockOwner_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getLockOwnerBytes() { + java.lang.Object ref = lockOwner_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + lockOwner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + lockOwner_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasLockOwner()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getLockOwnerBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getLockOwnerBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) obj; + + boolean result = true; + result = result && (hasLockOwner() == other.hasLockOwner()); + if (hasLockOwner()) { + result = result && getLockOwner() + .equals(other.getLockOwner()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLockOwner()) { + hash = (37 * hash) + LOCKOWNER_FIELD_NUMBER; + hash = (53 * hash) + getLockOwner().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLockOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_ReplicationLock_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lockOwner_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lockOwner_ = lockOwner_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.getDefaultInstance()) return this; + if (other.hasLockOwner()) { + setLockOwner(other.getLockOwner()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasLockOwner()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + lockOwner_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string lockOwner = 1; + private java.lang.Object lockOwner_ = ""; + public boolean hasLockOwner() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getLockOwner() { + java.lang.Object ref = lockOwner_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + lockOwner_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setLockOwner(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + lockOwner_ = value; + onChanged(); + return this; + } + public Builder clearLockOwner() { + bitField0_ = (bitField0_ & ~0x00000001); + lockOwner_ = getDefaultInstance().getLockOwner(); + onChanged(); + return this; + } + void setLockOwner(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + lockOwner_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:ReplicationLock) + } + + static { + defaultInstance = new ReplicationLock(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationLock) + } + + public interface TableLockOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bytes tableName = 1; + boolean hasTableName(); + com.google.protobuf.ByteString getTableName(); + + // optional .ServerName lockOwner = 2; + boolean hasLockOwner(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder(); + + // optional int64 threadId = 3; + boolean hasThreadId(); + long getThreadId(); + + // optional bool isShared = 4; + boolean hasIsShared(); + boolean getIsShared(); + + // optional string purpose = 5; + boolean hasPurpose(); + String getPurpose(); + } + public static final class TableLock extends + com.google.protobuf.GeneratedMessage + implements TableLockOrBuilder { + // Use TableLock.newBuilder() to construct. + private TableLock(Builder builder) { + super(builder); + } + private TableLock(boolean noInit) {} + + private static final TableLock defaultInstance; + public static TableLock getDefaultInstance() { + return defaultInstance; + } + + public TableLock getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_fieldAccessorTable; + } + + private int bitField0_; + // optional bytes tableName = 1; + public static final int TABLENAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString tableName_; + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // optional .ServerName lockOwner = 2; + public static final int LOCKOWNER_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_; + public boolean hasLockOwner() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() { + return lockOwner_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() { + return lockOwner_; + } + + // optional int64 threadId = 3; + public static final int THREADID_FIELD_NUMBER = 3; + private long threadId_; + public boolean hasThreadId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getThreadId() { + return threadId_; + } + + // optional bool isShared = 4; + public static final int ISSHARED_FIELD_NUMBER = 4; + private boolean isShared_; + public boolean hasIsShared() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public boolean getIsShared() { + return isShared_; + } + + // optional string purpose = 5; + public static final int PURPOSE_FIELD_NUMBER = 5; + private java.lang.Object purpose_; + public boolean hasPurpose() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public String getPurpose() { + java.lang.Object ref = purpose_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + purpose_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getPurposeBytes() { + java.lang.Object ref = purpose_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + purpose_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + tableName_ = com.google.protobuf.ByteString.EMPTY; + lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + threadId_ = 0L; + isShared_ = false; + purpose_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasLockOwner()) { + if (!getLockOwner().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, tableName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, lockOwner_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, threadId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(4, isShared_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getPurposeBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, tableName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, lockOwner_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, threadId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, isShared_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getPurposeBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasLockOwner() == other.hasLockOwner()); + if (hasLockOwner()) { + result = result && getLockOwner() + .equals(other.getLockOwner()); + } + result = result && (hasThreadId() == other.hasThreadId()); + if (hasThreadId()) { + result = result && (getThreadId() + == other.getThreadId()); + } + result = result && (hasIsShared() == other.hasIsShared()); + if (hasIsShared()) { + result = result && (getIsShared() + == other.getIsShared()); + } + result = result && (hasPurpose() == other.hasPurpose()); + if (hasPurpose()) { + result = result && getPurpose() + .equals(other.getPurpose()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLENAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasLockOwner()) { + hash = (37 * hash) + LOCKOWNER_FIELD_NUMBER; + hash = (53 * hash) + getLockOwner().hashCode(); + } + if (hasThreadId()) { + hash = (37 * hash) + THREADID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getThreadId()); + } + if (hasIsShared()) { + hash = (37 * hash) + ISSHARED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsShared()); + } + if (hasPurpose()) { + hash = (37 * hash) + PURPOSE_FIELD_NUMBER; + hash = (53 * hash) + getPurpose().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLockOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_TableLock_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getLockOwnerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + if (lockOwnerBuilder_ == null) { + lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + lockOwnerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + threadId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + isShared_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + purpose_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (lockOwnerBuilder_ == null) { + result.lockOwner_ = lockOwner_; + } else { + result.lockOwner_ = lockOwnerBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.threadId_ = threadId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.isShared_ = isShared_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.purpose_ = purpose_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this; + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasLockOwner()) { + mergeLockOwner(other.getLockOwner()); + } + if (other.hasThreadId()) { + setThreadId(other.getThreadId()); + } + if (other.hasIsShared()) { + setIsShared(other.getIsShared()); + } + if (other.hasPurpose()) { + setPurpose(other.getPurpose()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasLockOwner()) { + if (!getLockOwner().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + tableName_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + if (hasLockOwner()) { + subBuilder.mergeFrom(getLockOwner()); + } + input.readMessage(subBuilder, extensionRegistry); + setLockOwner(subBuilder.buildPartial()); + break; + } + case 24: { + bitField0_ |= 0x00000004; + threadId_ = input.readInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + isShared_ = input.readBool(); + break; + } + case 42: { + bitField0_ |= 0x00000010; + purpose_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // optional bytes tableName = 1; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // optional .ServerName lockOwner = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_; + public boolean hasLockOwner() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() { + if (lockOwnerBuilder_ == null) { + return lockOwner_; + } else { + return lockOwnerBuilder_.getMessage(); + } + } + public Builder setLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (lockOwnerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lockOwner_ = value; + onChanged(); + } else { + lockOwnerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setLockOwner( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (lockOwnerBuilder_ == null) { + lockOwner_ = builderForValue.build(); + onChanged(); + } else { + lockOwnerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (lockOwnerBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + lockOwner_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + lockOwner_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(lockOwner_).mergeFrom(value).buildPartial(); + } else { + lockOwner_ = value; + } + onChanged(); + } else { + lockOwnerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearLockOwner() { + if (lockOwnerBuilder_ == null) { + lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + lockOwnerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getLockOwnerBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getLockOwnerFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() { + if (lockOwnerBuilder_ != null) { + return lockOwnerBuilder_.getMessageOrBuilder(); + } else { + return lockOwner_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getLockOwnerFieldBuilder() { + if (lockOwnerBuilder_ == null) { + lockOwnerBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + lockOwner_, + getParentForChildren(), + isClean()); + lockOwner_ = null; + } + return lockOwnerBuilder_; + } + + // optional int64 threadId = 3; + private long threadId_ ; + public boolean hasThreadId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getThreadId() { + return threadId_; + } + public Builder setThreadId(long value) { + bitField0_ |= 0x00000004; + threadId_ = value; + onChanged(); + return this; + } + public Builder clearThreadId() { + bitField0_ = (bitField0_ & ~0x00000004); + threadId_ = 0L; + onChanged(); + return this; + } + + // optional bool isShared = 4; + private boolean isShared_ ; + public boolean hasIsShared() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public boolean getIsShared() { + return isShared_; + } + public Builder setIsShared(boolean value) { + bitField0_ |= 0x00000008; + isShared_ = value; + onChanged(); + return this; + } + public Builder clearIsShared() { + bitField0_ = (bitField0_ & ~0x00000008); + isShared_ = false; + onChanged(); + return this; + } + + // optional string purpose = 5; + private java.lang.Object purpose_ = ""; + public boolean hasPurpose() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public String getPurpose() { + java.lang.Object ref = purpose_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + purpose_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setPurpose(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + purpose_ = value; + onChanged(); + return this; + } + public Builder clearPurpose() { + bitField0_ = (bitField0_ & ~0x00000010); + purpose_ = getDefaultInstance().getPurpose(); + onChanged(); + return this; + } + void setPurpose(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000010; + purpose_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:TableLock) + } + + static { + defaultInstance = new TableLock(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableLock) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RootRegionServer_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RootRegionServer_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Master_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Master_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ClusterUp_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ClusterUp_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionTransition_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionTransition_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SplitLogTask_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SplitLogTask_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Table_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Table_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableCreationStatus_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableCreationStatus_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationPeer_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationPeer_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationHLogPosition_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationHLogPosition_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLock_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLock_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableLock_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableLock_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" + + "gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" + + "%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" + + "\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" + + "nTransition\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nre" + + "gionName\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022\037\n\nse" + + "rverName\030\004 \002(\0132\013.ServerName\022\017\n\007payload\030\005" + + " \001(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002(\0162\023.S" + + "plitLogTask.State\022\037\n\nserverName\030\002 \002(\0132\013." + + "ServerName\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005O", + "WNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\"" + + "\201\001\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007" + + "ENABLED\"R\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLE" + + "D\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\022\021\n\rCREA" + + "TINGTABLE\020\004\"\315\001\n\023TableCreationStatus\0225\n\005s" + + "tate\030\001 \002(\0162\032.TableCreationStatus.State:\n" + + "CREATED_TD\"\177\n\005State\022\016\n\nCREATED_TD\020\000\022\026\n\022C" + + "REATED_REGIONINFO\020\001\022\032\n\026MOVED_TO_ORIG_LOC" + + "ATION\020\002\022\017\n\013ADD_TO_META\020\003\022\027\n\023ASSIGN_USER_" + + "REGIONS\020\004\022\010\n\004DONE\020\004\"%\n\017ReplicationPeer\022\022", + "\n\nclusterkey\030\001 \002(\t\"^\n\020ReplicationState\022&" + + "\n\005state\030\001 \002(\0162\027.ReplicationState.State\"\"" + + "\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Re" + + "plicationHLogPosition\022\020\n\010position\030\001 \002(\003\"" + + "$\n\017ReplicationLock\022\021\n\tlockOwner\030\001 \002(\t\"s\n" + + "\tTableLock\022\021\n\ttableName\030\001 \001(\014\022\036\n\tlockOwn" + + "er\030\002 \001(\0132\013.ServerName\022\020\n\010threadId\030\003 \001(\003\022" + + "\020\n\010isShared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\tBE\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\017ZooKeeperProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_RootRegionServer_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_RootRegionServer_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RootRegionServer_descriptor, + new java.lang.String[] { "Server", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.Builder.class); + internal_static_Master_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_Master_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Master_descriptor, + new java.lang.String[] { "Master", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class); + internal_static_ClusterUp_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_ClusterUp_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ClusterUp_descriptor, + new java.lang.String[] { "StartDate", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class); + internal_static_RegionTransition_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_RegionTransition_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionTransition_descriptor, + new java.lang.String[] { "EventTypeCode", "RegionName", "CreateTime", "ServerName", "Payload", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class); + internal_static_SplitLogTask_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_SplitLogTask_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SplitLogTask_descriptor, + new java.lang.String[] { "State", "ServerName", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class); + internal_static_Table_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_Table_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Table_descriptor, + new java.lang.String[] { "State", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); + internal_static_TableCreationStatus_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_TableCreationStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableCreationStatus_descriptor, + new java.lang.String[] { "State", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus.Builder.class); + internal_static_ReplicationPeer_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_ReplicationPeer_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationPeer_descriptor, + new java.lang.String[] { "Clusterkey", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder.class); + internal_static_ReplicationState_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_ReplicationState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationState_descriptor, + new java.lang.String[] { "State", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder.class); + internal_static_ReplicationHLogPosition_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_ReplicationHLogPosition_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationHLogPosition_descriptor, + new java.lang.String[] { "Position", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder.class); + internal_static_ReplicationLock_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_ReplicationLock_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLock_descriptor, + new java.lang.String[] { "LockOwner", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.Builder.class); + internal_static_TableLock_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_TableLock_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableLock_descriptor, + new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} Index: hbase-protocol/src/main/protobuf/ZooKeeper.proto =================================================================== --- hbase-protocol/src/main/protobuf/ZooKeeper.proto (revision 1459141) +++ hbase-protocol/src/main/protobuf/ZooKeeper.proto (working copy) @@ -93,12 +93,27 @@ DISABLED = 1; DISABLING = 2; ENABLING = 3; + CREATINGTABLE = 4; } // This is the table's state. If no znode for a table, // its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class // for more. required State state = 1 [default = ENABLED]; } +/** + * The znode that holds the current state of the table creation + */ + message TableCreationStatus { + enum State { + CREATED_TD = 0; + CREATED_REGIONINFO = 1; + MOVED_TO_ORIG_LOCATION = 2; + ADD_TO_META = 3; + ASSIGN_USER_REGIONS = 4; + DONE = 4; + } + required State state = 1 [default = CREATED_TD]; + } /** * Used by replication. Holds a replication peer key. Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1459141) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy) @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; @@ -53,7 +54,9 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException; +import org.apache.hadoop.hbase.exceptions.TableExistsException; import org.apache.hadoop.hbase.exceptions.TableNotFoundException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; @@ -62,6 +65,7 @@ import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler; +import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; @@ -368,7 +372,9 @@ // Scan META to build list of existing regions, servers, and assignment // Returns servers who have not checked in (assumed dead) and their regions - Map> deadServers = rebuildUserRegions(); + Pair>, Map>> recoveryInfo + = rebuildUserRegions(); + Map> deadServers = recoveryInfo.getFirst(); // This method will assign all user regions if a clean server startup or // it will reconstruct master state and cleanup any leftovers from @@ -377,6 +383,8 @@ recoverTableInDisablingState(); recoverTableInEnablingState(); + + recoverTableInCreation(recoveryInfo.getSecond()); } /** @@ -2321,7 +2329,9 @@ * in META * @throws IOException */ - Map> rebuildUserRegions() throws IOException, KeeperException { + Pair>, Map>> rebuildUserRegions() + throws IOException, KeeperException { + Set creatingTables = ZKTable.getCreatingTables(watcher); Set enablingTables = ZKTable.getEnablingTables(watcher); Set disabledOrEnablingTables = ZKTable.getDisabledTables(watcher); disabledOrEnablingTables.addAll(enablingTables); @@ -2332,9 +2342,12 @@ List results = MetaReader.fullScan(this.catalogTracker); // Get any new but slow to checkin region server that joined the cluster Set onlineServers = serverManager.getOnlineServers().keySet(); + Pair>, Map>> pair = + new Pair>, Map>>(); // Map of offline servers and their regions to be returned Map> offlineServers = new TreeMap>(); + Map> hriOfCreatingTables = new HashMap>(); // Iterate regions in META for (Result result : results) { Pair region = HRegionInfo.getHRegionInfoAndServerName(result); @@ -2360,6 +2373,14 @@ " has null regionLocation." + " But its table " + tableName + " isn't in ENABLING state."); } + // If the table is in CREATING_TABLE state and if the regionLocation is null + // it means they are not yet assigned while table creation + + // Collect the hris for the regions of the table that is under creation + if (creatingTables.contains(tableName)) { + populateCreatingTableHRIs(hriOfCreatingTables, regionInfo, tableName, true); + } + } else if (!onlineServers.contains(regionLocation)) { // Region is located on a server that isn't online List offlineRegions = offlineServers.get(regionLocation); @@ -2368,11 +2389,14 @@ offlineServers.put(regionLocation, offlineRegions); } offlineRegions.add(regionInfo); + populateCreatingTableHRIs(hriOfCreatingTables, regionInfo, tableName, false); // need to enable the table if not disabled or disabling or enabling // this will be used in rolling restarts - if (!disabledOrDisablingOrEnabling.contains(tableName) - && !getZKTable().isEnabledTable(tableName)) { - setEnabledTable(tableName); + if (!creatingTables.contains(tableName)) { + if (!disabledOrDisablingOrEnabling.contains(tableName) + && !getZKTable().isEnabledTable(tableName)) { + setEnabledTable(tableName); + } } } else { // If region is in offline and split state check the ZKNode @@ -2390,20 +2414,39 @@ } // Region is being served and on an active server // add only if region not in disabled or enabling table + // Even if in CreatingTable state we can make the region as online + // Ensure we take care while recovery if (!disabledOrEnablingTables.contains(tableName)) { regionStates.regionOnline(regionInfo, regionLocation); + populateCreatingTableHRIs(hriOfCreatingTables, regionInfo, tableName, false); } // need to enable the table if not disabled or disabling or enabling // this will be used in rolling restarts - if (!disabledOrDisablingOrEnabling.contains(tableName) - && !getZKTable().isEnabledTable(tableName)) { - setEnabledTable(tableName); + if (!creatingTables.contains(tableName)) { + if (!disabledOrDisablingOrEnabling.contains(tableName) + && !getZKTable().isEnabledTable(tableName)) { + setEnabledTable(tableName); + } } } } - return offlineServers; + pair.setFirst(offlineServers); + pair.setSecond(hriOfCreatingTables); + return pair; } + private void populateCreatingTableHRIs( + Map> hriOfCreatingTables, HRegionInfo regionInfo, + String tableName, boolean process) { + if (hriOfCreatingTables.get(tableName) == null) { + Map infoDetails = new HashMap(); + infoDetails.put(regionInfo, process); + hriOfCreatingTables.put(tableName, infoDetails); + } else { + hriOfCreatingTables.get(tableName).put(regionInfo, process); + } + } + /** * Recover the tables that were not fully moved to DISABLED state. These * tables are in DISABLING state when the master restarted/switched. @@ -2451,6 +2494,34 @@ } } } + + private void recoverTableInCreation(Map> hriOfCreatingTable) + throws NotAllMetaRegionsOnlineException, TableExistsException, IOException { + if (hriOfCreatingTable != null) { + Iterator>> iterator = hriOfCreatingTable.entrySet() + .iterator(); + while (iterator.hasNext()) { + // Recover by calling CreateTableHandler + Entry> details = iterator.next(); + String tableName = details.getKey(); + Map hriDetails = details.getValue(); + Iterator> hriIterator = hriDetails.entrySet().iterator(); + HRegionInfo[] newRegions = new HRegionInfo[hriDetails.size()]; + int count = 0; + while (hriIterator.hasNext()) { + Entry hri = hriIterator.next(); + if (hri.getValue() == true) { + newRegions[count] = hri.getKey(); + count++; + } + } + LOG.info("The table " + tableName + + " is in CREATINGTABLE state. Hence trying to create the table"); + new CreateTableHandler(this.server, ((HMaster) this.server).getMasterFileSystem(), null, + server.getConfiguration(), newRegions, (MasterServices) this.server, true, tableName).prepare().process(); + } + } + } /** * Processes list of dead servers from result of META scan and regions in RIT Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java (revision 1459141) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java (working copy) @@ -20,6 +20,8 @@ import java.io.IOException; import java.io.InterruptedIOException; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.commons.logging.Log; @@ -45,10 +47,16 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCreationStatus; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.zookeeper.ZKTable; import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; +import com.google.common.collect.Lists; + /** * Handler to create a table. */ @@ -63,10 +71,12 @@ private final TableLockManager tableLockManager; private final HRegionInfo [] newRegions; private final TableLock tableLock; + private final boolean bypass; + private final String tableName; public CreateTableHandler(Server server, MasterFileSystem fileSystemManager, HTableDescriptor hTableDescriptor, Configuration conf, HRegionInfo [] newRegions, - MasterServices masterServices) { + MasterServices masterServices, boolean bypass, String tableName) { super(server, EventType.C_M_CREATE_TABLE); this.fileSystemManager = fileSystemManager; @@ -76,8 +86,12 @@ this.catalogTracker = masterServices.getCatalogTracker(); this.assignmentManager = masterServices.getAssignmentManager(); this.tableLockManager = masterServices.getTableLockManager(); + this.bypass = bypass; + this.tableName = (this.hTableDescriptor == null) ? tableName : this.hTableDescriptor + .getNameAsString(); - this.tableLock = this.tableLockManager.writeLock(this.hTableDescriptor.getName() + byte[] name = ((this.hTableDescriptor == null)? Bytes.toBytes(tableName):this.hTableDescriptor.getName()); + this.tableLock = this.tableLockManager.writeLock(name , EventType.C_M_CREATE_TABLE.toString()); } @@ -100,7 +114,7 @@ this.tableLock.acquire(); boolean success = false; try { - String tableName = this.hTableDescriptor.getNameAsString(); + String tableName = this.tableName; if (MetaReader.tableExists(catalogTracker, tableName)) { throw new TableExistsException(tableName); } @@ -109,17 +123,31 @@ // same time, given the async nature of the operation, the table // could be in a state where .META. table hasn't been updated yet in // the process() function. - // Use enabling state to tell if there is already a request for the same + // Use CREATINGTABLE state to tell if there is already a request for the same // table in progress. This will introduce a new zookeeper call. Given // createTable isn't a frequent operation, that should be ok. //TODO: now that we have table locks, re-evaluate above - try { - if (!this.assignmentManager.getZKTable().checkAndSetEnablingTable(tableName)) { - throw new TableExistsException(tableName); - } - } catch (KeeperException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a ZooKeeper issue", e); + if (!this.bypass) { + try { + if (!this.assignmentManager.getZKTable().checkAndSetCreatingTable(tableName)) { + throw new TableExistsException(tableName); + } + // Create the status znode under the /hbase/table/tablename znode + // If this creation fails we can throw IOException. If the master + // aborts just after this + // then we can just clean up the znode on master restart. Client will + // anyways recreate the table + // as isTableAvailable() will always be false + + this.assignmentManager.getZKTable().createTableCreationStatusNode(tableName); + + } catch (KeeperException e) { + throw new IOException("Unable to ensure that the table will be" + + " enabling because of a ZooKeeper issue", e); + } + } else{ + // This is called when the backup master comes alive and sees a table that was partially created + handlePartiallyCreatedTable(tableName); } success = true; } finally { @@ -130,6 +158,109 @@ return this; } + private void handlePartiallyCreatedTable(String tableName) { + ZKTable zkTable = this.assignmentManager.getZKTable(); + try { + TableCreationStatus.State currentTableCreationState = zkTable + .getCurrentTableCreationState(tableName); + Path tempDir = null; + FileSystem fs = null; + if (currentTableCreationState == null) { + cleanUpFailedCreation(); + return; + } + switch (currentTableCreationState) { + case CREATED_TD: + // Just clear the table znode and tableStatus znode. Upto the client to recreate the table + cleanUpFailedCreation(); + break; + case CREATED_REGIONINFO: + // Try forming the region infos from the tmp directory. Try creating the table once again + tempDir = this.fileSystemManager.getTempDir(); + fs = this.fileSystemManager.getFileSystem(); + try { + + Path tempTableDir = new Path(tempDir, tableName); + RegionInfoExtractor infoExtractor = new RegionInfoExtractor(tempTableDir, fs, tableName); + List regionInfos = infoExtractor.collectRegionInfos(); + List currentHRegions = Arrays.asList(this.newRegions); + regionInfos.retainAll(currentHRegions); + addToMetaAndBulkAssign(tableName, zkTable, regionInfos); + + // 8. Set the status in the zookeeper as ASSIGN_USER_REGIONS + zkTable.setStatusAssignUserRegions(tableName); + + setEnabledState(tableName); + + //TODO check about the table locks here + } catch (IOException e) { + LOG.error("Cannot obtain the tabledescriptor from the tempdir for the table "+tableName); + cleanUpFailedCreation(); + } + break; + case MOVED_TO_ORIG_LOCATION: + Path rootDir = this.fileSystemManager.getRootDir(); + fs = this.fileSystemManager.getFileSystem(); + try { + // Try forming the region infos from the table directory. Try + // creating the table once again + Path tableDir = HTableDescriptor.getTableDir(rootDir, Bytes.toBytes(tableName)); + RegionInfoExtractor infoExtractor = new RegionInfoExtractor(tableDir, fs, tableName); + List regionInfos = infoExtractor.collectRegionInfos(); + List currentHRegions = Arrays.asList(this.newRegions); + regionInfos.retainAll(currentHRegions); + addToMetaAndBulkAssign(tableName, zkTable, regionInfos); + + // 8. Set the status in the zookeeper as ASSIGN_USER_REGIONS + //zkTable.setStatusAssignUserRegions(tableName); + + setEnabledState(tableName); + + } catch (IOException ioe) { + LOG.error("Cannot obtain the tabledescriptor from the tempdir for the table " + + tableName); + cleanUpFailedCreation(); + } + break; + case ADD_TO_META : + // Do the assignment of all the regions added to META + try { + List regionInfos = MetaReader.getTableRegions(this.catalogTracker, Bytes.toBytes(tableName)); + + // 7. Trigger immediate assignment of the regions in round-robin + // fashion + try { + assignmentManager.getRegionStates().createRegionStates(regionInfos); + assignmentManager.assign(regionInfos); + } catch (InterruptedException e) { + LOG.error("Caught " + e + " during round-robin assignment"); + InterruptedIOException ie = new InterruptedIOException(e.getMessage()); + ie.initCause(e); + throw ie; + } + } catch (IOException e) { + LOG.error("Error while collecting the regions for the table "+tableName+" from META.", e); + } + zkTable.setStatusAssignUserRegions(tableName); + break; + case ASSIGN_USER_REGIONS: + // Just make the table to go to ENABLED state. The AM should take care of the regions that are yet to be assigned + // TODO : Check if any special handling is required + break; + default: + throw new IllegalArgumentException("Invalid state from table status node for the table "+ tableName); + } + } catch (KeeperException e) { + LOG.error("Unable to get data from the znode ", e); + } + } + + private void cleanUpFailedCreation() { + releaseTableLock(); + this.assignmentManager.getZKTable().removeCreatingTable( + this.tableName); + } + @Override public String toString() { String name = "UnknownServerName"; @@ -137,12 +268,12 @@ name = server.getServerName().toString(); } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + - this.hTableDescriptor.getNameAsString(); + this.tableName; } @Override public void process() { - String tableName = this.hTableDescriptor.getNameAsString(); + String tableName = this.tableName; LOG.info("Attempting to create the table " + tableName); try { @@ -166,6 +297,13 @@ * @param exception null if process() is successful or not null if something has failed. */ protected void completed(final Throwable exception) { + // TODO: clean up the table status node also here + String tableName = this.tableName; + try { + this.assignmentManager.getZKTable().cleanUpTableStatusNode(tableName); + } catch (KeeperException e) { + LOG.error("Could not delete the table status znode for the table "+tableName); + } releaseTableLock(); if (exception != null) { // Try deleting the enabling node in case of error @@ -173,7 +311,7 @@ // again with the same Active master // It will block the creation saying TableAlreadyExists. this.assignmentManager.getZKTable().removeEnablingTable( - this.hTableDescriptor.getNameAsString()); + tableName); } } @@ -197,23 +335,61 @@ // 1. Create Table Descriptor FSTableDescriptors.createTableDescriptor(fs, tempdir, this.hTableDescriptor); + + // 2. Set the status in the zookeeper as CREATED_TD + ZKTable zkTable = this.assignmentManager.getZKTable(); + zkTable.setStatusCreatedTableDescriptor(tableName); + Path tempTableDir = new Path(tempdir, tableName); Path tableDir = new Path(fileSystemManager.getRootDir(), tableName); - // 2. Create Regions + // 3. Create Regions List regionInfos = handleCreateHdfsRegions(tempdir, tableName); + + // 4. Set the status in the zookeeper as CREATED_REGIONINFO + zkTable.setStatusCreatedRegionInfo(tableName); - // 3. Move Table temp directory to the hbase root location + // 5. Move Table temp directory to the hbase root location if (!fs.rename(tempTableDir, tableDir)) { throw new IOException("Unable to move table from temp=" + tempTableDir + " to hbase root=" + tableDir); } + + // 6. Set the status in the zookeeper as MOVED_TO_ORIG_LOCATION + zkTable.setStatusMovedToOriginalLocation(tableName); + + // 7. Add to meta and assign the regions + addToMetaAndBulkAssign(tableName, zkTable, regionInfos); + + // 8. Set the status in the zookeeper as ASSIGN_USER_REGIONS + // This state may not be needed + //zkTable.setStatusAssignUserRegions(tableName); + + // 9. Set table enabled flag up in zk. + setEnabledState(tableName); + } + private void setEnabledState(String tableName) throws IOException { + try { + assignmentManager.getZKTable().setEnabledTable(tableName); + } catch (KeeperException e) { + throw new IOException("Unable to ensure that " + tableName + " will be" + + " enabled because of a ZooKeeper issue", e); + } + } + + private void addToMetaAndBulkAssign(String tableName, ZKTable zkTable, + List regionInfos) throws IOException, NoNodeException, KeeperException, + InterruptedIOException { + // Note that the regionInfo cannot be 0. The HMaster creation of HRegionInfo[] takes care of it. if (regionInfos != null && regionInfos.size() > 0) { - // 4. Add regions to META + // Add regions to META MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos); + + // Set the status in the zookeeper as ADD_TO_META + zkTable.setStatusToAddedToMeta(tableName); - // 5. Trigger immediate assignment of the regions in round-robin fashion + // Trigger immediate assignment of the regions in round-robin fashion try { assignmentManager.getRegionStates().createRegionStates(regionInfos); assignmentManager.assign(regionInfos); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/RegionInfoExtractor.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/RegionInfoExtractor.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/RegionInfoExtractor.java (working copy) @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.handler; + +import java.io.IOException; +import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; +import java.util.TreeSet; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.util.Bytes; + +public class RegionInfoExtractor { + + private final Path path; + private final FileSystem fs; + private final String tableName; + private final PathFilter regionInfoFilter = new PathFilter() { + + @Override + public boolean accept(Path path) { + if (!path.getName().endsWith(".regioninfo")) { + return false; + } + return true; + } + }; + + public RegionInfoExtractor(Path path, FileSystem fs, String tableName) { + this.path = path; + this.fs = fs; + this.tableName = tableName; + } + + public List collectRegionInfos() throws IOException { + TreeSet hris = new TreeSet(new RegionInfoComparatorBasedOnStartKey()); + FileStatus[] listStatus = fs.listStatus(this.path); + for (FileStatus regionDir : listStatus) { + if (regionDir.isDir()) { + HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath()); + hris.add(hri); + } + } + + List hriList= new LinkedList(); + hriList.addAll(hris); + return hriList; + } + + private static class RegionInfoComparatorBasedOnStartKey implements Comparator { + @Override + public int compare(HRegionInfo l, HRegionInfo r) { + int compareTo = Bytes.compareTo(l.getStartKey(), r.getStartKey()); + if (compareTo < 0) + return -1; + if (compareTo > 0) + return 1; + return 0; + } + } + +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1459141) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -1462,7 +1462,7 @@ this.executorService.submit(new CreateTableHandler(this, this.fileSystemManager, hTableDescriptor, conf, - newRegions, this).prepare()); + newRegions, this, false, hTableDescriptor.getNameAsString()).prepare()); if (cpHost != null) { cpHost.postCreateTable(hTableDescriptor, newRegions); } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java (revision 1459141) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java (working copy) @@ -67,7 +67,7 @@ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) throws NotAllMetaRegionsOnlineException, TableExistsException, IOException { super(masterServices, masterServices.getMasterFileSystem(), hTableDescriptor, - masterServices.getConfiguration(), null, masterServices); + masterServices.getConfiguration(), null, masterServices, false, hTableDescriptor.getNameAsString()); // Snapshot information this.snapshot = snapshot; Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java (revision 1459141) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java (working copy) @@ -1,110 +1,111 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.handler; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.TestMaster; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKTable; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(MediumTests.class) -public class TestCreateTableHandler { - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final Log LOG = LogFactory.getLog(TestCreateTableHandler.class); - private static final byte[] TABLENAME = Bytes.toBytes("TestCreateTableHandler"); - private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); - public static boolean throwException = false; - - @BeforeClass - public static void setUp() throws Exception { - TEST_UTIL.startMiniCluster(1); - } - - @AfterClass - public static void tearDown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testCreateTableHandlerIfCalledTwoTimesAndFirstOneIsUnderProgress() throws Exception { - final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - final HMaster m = cluster.getMaster(); - final HTableDescriptor desc = new HTableDescriptor(TABLENAME); - desc.addFamily(new HColumnDescriptor(FAMILYNAME)); - final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getName(), null, - null) }; - CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(), - desc, cluster.getConfiguration(), hRegionInfos, m); - handler.prepare(); - throwException = true; - handler.process(); - throwException = false; - CustomCreateTableHandler handler1 = new CustomCreateTableHandler(m, m.getMasterFileSystem(), - desc, cluster.getConfiguration(), hRegionInfos, m); - handler1.prepare(); - handler1.process(); - for (int i = 0; i < 100; i++) { - if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(TABLENAME)) { - Thread.sleep(200); - } - } - assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(TABLENAME)); - - } - - private static class CustomCreateTableHandler extends CreateTableHandler { - public CustomCreateTableHandler(Server server, MasterFileSystem fileSystemManager, - HTableDescriptor hTableDescriptor, Configuration conf, HRegionInfo[] newRegions, - MasterServices masterServices) { - super(server, fileSystemManager, hTableDescriptor, conf, newRegions, masterServices); - } - - @Override - protected List handleCreateHdfsRegions(Path tableRootDir, String tableName) - throws IOException { - if (throwException) { - throw new IOException("Test throws exceptions."); - } - return super.handleCreateHdfsRegions(tableRootDir, tableName); - } - } -} +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.handler; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.TestMaster; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKTable; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestCreateTableHandler { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final Log LOG = LogFactory.getLog(TestCreateTableHandler.class); + private static final byte[] TABLENAME = Bytes.toBytes("TestCreateTableHandler"); + private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); + public static boolean throwException = false; + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testCreateTableHandlerIfCalledTwoTimesAndFirstOneIsUnderProgress() throws Exception { + final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + final HMaster m = cluster.getMaster(); + final HTableDescriptor desc = new HTableDescriptor(TABLENAME); + desc.addFamily(new HColumnDescriptor(FAMILYNAME)); + final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getName(), null, + null) }; + CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(), + desc, cluster.getConfiguration(), hRegionInfos, m); + handler.prepare(); + throwException = true; + handler.process(); + throwException = false; + CustomCreateTableHandler handler1 = new CustomCreateTableHandler(m, m.getMasterFileSystem(), + desc, cluster.getConfiguration(), hRegionInfos, m); + handler1.prepare(); + handler1.process(); + for (int i = 0; i < 100; i++) { + if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(TABLENAME)) { + Thread.sleep(200); + } + } + assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(TABLENAME)); + + } + + private static class CustomCreateTableHandler extends CreateTableHandler { + public CustomCreateTableHandler(Server server, MasterFileSystem fileSystemManager, + HTableDescriptor hTableDescriptor, Configuration conf, HRegionInfo[] newRegions, + MasterServices masterServices) { + super(server, fileSystemManager, hTableDescriptor, conf, newRegions, masterServices, false, + hTableDescriptor.getNameAsString()); + } + + @Override + protected List handleCreateHdfsRegions(Path tableRootDir, String tableName) + throws IOException { + if (throwException) { + throw new IOException("Test throws exceptions."); + } + return super.handleCreateHdfsRegions(tableRootDir, tableName); + } + } +}