diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index eba412d..630fca9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent; @@ -52,6 +54,7 @@ import org.apache.zookeeper.proto.DeleteRequest; import org.apache.zookeeper.proto.SetDataRequest; import org.apache.zookeeper.server.ZooKeeperSaslServer; +import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; import javax.security.auth.login.AppConfigurationEntry; @@ -69,6 +72,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.TreeMap; /** * Internal HBase utility class for ZooKeeper. @@ -1857,4 +1861,67 @@ public class ZKUtil { return 0; } } + + /** + * @param regionLastFlushedSequenceId the flushed sequence id of a region which is the min of its + * store max seq ids + * @param storeSequenceIds column family to sequence Id map + * @return Serialized protobuf of RegionSequenceIds with pb magic prefix prepended + * suitable for use to filter wal edits in distributedLogReplay mode + */ + public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId, + final Map storeSequenceIds) { + ZooKeeperProtos.RegionSequenceIds.Builder regionSequenceIdsBuilder = + ZooKeeperProtos.RegionSequenceIds.newBuilder(); + ZooKeeperProtos.StoreSequenceId.Builder storeSequenceIdBuilder = + ZooKeeperProtos.StoreSequenceId.newBuilder(); + if (storeSequenceIds != null) { + for (byte[] columnFamilyName : storeSequenceIds.keySet()) { + Long curSeqId = storeSequenceIds.get(columnFamilyName); + storeSequenceIdBuilder.setFamilyName(ByteString.copyFrom(columnFamilyName)); + storeSequenceIdBuilder.setSequenceId(curSeqId); + regionSequenceIdsBuilder.addStoreSequenceId(storeSequenceIdBuilder.build()); + storeSequenceIdBuilder.clear(); + } + } + regionSequenceIdsBuilder.setLastFlushedSequenceId(regionLastFlushedSequenceId); + byte[] result = regionSequenceIdsBuilder.build().toByteArray(); + return ProtobufUtil.prependPBMagic(result); + } + + /** + * @param bytes Content of serialized data of RegionSequenceIds + * @return + * @throws DeserializationException + */ + public static RegionSequenceIds parseRegionStoreSequenceIds(final byte[] bytes) + throws DeserializationException { + if (bytes == null || !ProtobufUtil.isPBMagicPrefix(bytes)) { + throw new DeserializationException("Unable to parse RegionStoreSequenceIds."); + } + ZooKeeperProtos.RegionSequenceIds.Builder regionSequenceIdsBuilder = + ZooKeeperProtos.RegionSequenceIds.newBuilder(); + int pblen = ProtobufUtil.lengthOfPBMagic(); + RegionSequenceIds storeIds = null; + try { + storeIds = regionSequenceIdsBuilder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return storeIds; + } + + /** + * @param bytes Content of serialized data of RegionSequenceIds + * @return last flushed sequence id of a region which is the min of its store max seq ids + * @throws DeserializationException + */ + public static long parseRegionLastFlushedSequenceId(final byte[] bytes) + throws DeserializationException { + RegionSequenceIds regionSequenceIds = parseRegionStoreSequenceIds(bytes); + if (regionSequenceIds != null) { + return regionSequenceIds.getLastFlushedSequenceId(); + } + return -1; + } } diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 1395704..763647c 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -5741,6 +5741,1132 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:TableLock) } + public interface StoreSequenceIdOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes familyName = 1; + boolean hasFamilyName(); + com.google.protobuf.ByteString getFamilyName(); + + // required uint64 sequenceId = 2; + boolean hasSequenceId(); + long getSequenceId(); + } + public static final class StoreSequenceId extends + com.google.protobuf.GeneratedMessage + implements StoreSequenceIdOrBuilder { + // Use StoreSequenceId.newBuilder() to construct. + private StoreSequenceId(Builder builder) { + super(builder); + } + private StoreSequenceId(boolean noInit) {} + + private static final StoreSequenceId defaultInstance; + public static StoreSequenceId getDefaultInstance() { + return defaultInstance; + } + + public StoreSequenceId getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable; + } + + private int bitField0_; + // required bytes familyName = 1; + public static final int FAMILYNAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString familyName_; + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + + // required uint64 sequenceId = 2; + public static final int SEQUENCEID_FIELD_NUMBER = 2; + private long sequenceId_; + public boolean hasSequenceId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getSequenceId() { + return sequenceId_; + } + + private void initFields() { + familyName_ = com.google.protobuf.ByteString.EMPTY; + sequenceId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSequenceId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, sequenceId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, sequenceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) obj; + + boolean result = true; + result = result && (hasFamilyName() == other.hasFamilyName()); + if (hasFamilyName()) { + result = result && getFamilyName() + .equals(other.getFamilyName()); + } + result = result && (hasSequenceId() == other.hasSequenceId()); + if (hasSequenceId()) { + result = result && (getSequenceId() + == other.getSequenceId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFamilyName()) { + hash = (37 * hash) + FAMILYNAME_FIELD_NUMBER; + hash = (53 * hash) + getFamilyName().hashCode(); + } + if (hasSequenceId()) { + hash = (37 * hash) + SEQUENCEID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSequenceId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + familyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + sequenceId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.familyName_ = familyName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.sequenceId_ = sequenceId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()) return this; + if (other.hasFamilyName()) { + setFamilyName(other.getFamilyName()); + } + if (other.hasSequenceId()) { + setSequenceId(other.getSequenceId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFamilyName()) { + + return false; + } + if (!hasSequenceId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + familyName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + sequenceId_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required bytes familyName = 1; + private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + public Builder setFamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + familyName_ = value; + onChanged(); + return this; + } + public Builder clearFamilyName() { + bitField0_ = (bitField0_ & ~0x00000001); + familyName_ = getDefaultInstance().getFamilyName(); + onChanged(); + return this; + } + + // required uint64 sequenceId = 2; + private long sequenceId_ ; + public boolean hasSequenceId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getSequenceId() { + return sequenceId_; + } + public Builder setSequenceId(long value) { + bitField0_ |= 0x00000002; + sequenceId_ = value; + onChanged(); + return this; + } + public Builder clearSequenceId() { + bitField0_ = (bitField0_ & ~0x00000002); + sequenceId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StoreSequenceId) + } + + static { + defaultInstance = new StoreSequenceId(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StoreSequenceId) + } + + public interface RegionSequenceIdsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 lastFlushedSequenceId = 1; + boolean hasLastFlushedSequenceId(); + long getLastFlushedSequenceId(); + + // repeated .StoreSequenceId storeSequenceId = 2; + java.util.List + getStoreSequenceIdList(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index); + int getStoreSequenceIdCount(); + java.util.List + getStoreSequenceIdOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index); + } + public static final class RegionSequenceIds extends + com.google.protobuf.GeneratedMessage + implements RegionSequenceIdsOrBuilder { + // Use RegionSequenceIds.newBuilder() to construct. + private RegionSequenceIds(Builder builder) { + super(builder); + } + private RegionSequenceIds(boolean noInit) {} + + private static final RegionSequenceIds defaultInstance; + public static RegionSequenceIds getDefaultInstance() { + return defaultInstance; + } + + public RegionSequenceIds getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionSequenceIds_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionSequenceIds_fieldAccessorTable; + } + + private int bitField0_; + // required uint64 lastFlushedSequenceId = 1; + public static final int LASTFLUSHEDSEQUENCEID_FIELD_NUMBER = 1; + private long lastFlushedSequenceId_; + public boolean hasLastFlushedSequenceId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getLastFlushedSequenceId() { + return lastFlushedSequenceId_; + } + + // repeated .StoreSequenceId storeSequenceId = 2; + public static final int STORESEQUENCEID_FIELD_NUMBER = 2; + private java.util.List storeSequenceId_; + public java.util.List getStoreSequenceIdList() { + return storeSequenceId_; + } + public java.util.List + getStoreSequenceIdOrBuilderList() { + return storeSequenceId_; + } + public int getStoreSequenceIdCount() { + return storeSequenceId_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) { + return storeSequenceId_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index) { + return storeSequenceId_.get(index); + } + + private void initFields() { + lastFlushedSequenceId_ = 0L; + storeSequenceId_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasLastFlushedSequenceId()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getStoreSequenceIdCount(); i++) { + if (!getStoreSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, lastFlushedSequenceId_); + } + for (int i = 0; i < storeSequenceId_.size(); i++) { + output.writeMessage(2, storeSequenceId_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, lastFlushedSequenceId_); + } + for (int i = 0; i < storeSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, storeSequenceId_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds) obj; + + boolean result = true; + result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId()); + if (hasLastFlushedSequenceId()) { + result = result && (getLastFlushedSequenceId() + == other.getLastFlushedSequenceId()); + } + result = result && getStoreSequenceIdList() + .equals(other.getStoreSequenceIdList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLastFlushedSequenceId()) { + hash = (37 * hash) + LASTFLUSHEDSEQUENCEID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); + } + if (getStoreSequenceIdCount() > 0) { + hash = (37 * hash) + STORESEQUENCEID_FIELD_NUMBER; + hash = (53 * hash) + getStoreSequenceIdList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIdsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionSequenceIds_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionSequenceIds_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreSequenceIdFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lastFlushedSequenceId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (storeSequenceIdBuilder_ == null) { + storeSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + storeSequenceIdBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lastFlushedSequenceId_ = lastFlushedSequenceId_; + if (storeSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.storeSequenceId_ = storeSequenceId_; + } else { + result.storeSequenceId_ = storeSequenceIdBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds.getDefaultInstance()) return this; + if (other.hasLastFlushedSequenceId()) { + setLastFlushedSequenceId(other.getLastFlushedSequenceId()); + } + if (storeSequenceIdBuilder_ == null) { + if (!other.storeSequenceId_.isEmpty()) { + if (storeSequenceId_.isEmpty()) { + storeSequenceId_ = other.storeSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.addAll(other.storeSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeSequenceId_.isEmpty()) { + if (storeSequenceIdBuilder_.isEmpty()) { + storeSequenceIdBuilder_.dispose(); + storeSequenceIdBuilder_ = null; + storeSequenceId_ = other.storeSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + storeSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreSequenceIdFieldBuilder() : null; + } else { + storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasLastFlushedSequenceId()) { + + return false; + } + for (int i = 0; i < getStoreSequenceIdCount(); i++) { + if (!getStoreSequenceId(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lastFlushedSequenceId_ = input.readUInt64(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addStoreSequenceId(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required uint64 lastFlushedSequenceId = 1; + private long lastFlushedSequenceId_ ; + public boolean hasLastFlushedSequenceId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getLastFlushedSequenceId() { + return lastFlushedSequenceId_; + } + public Builder setLastFlushedSequenceId(long value) { + bitField0_ |= 0x00000001; + lastFlushedSequenceId_ = value; + onChanged(); + return this; + } + public Builder clearLastFlushedSequenceId() { + bitField0_ = (bitField0_ & ~0x00000001); + lastFlushedSequenceId_ = 0L; + onChanged(); + return this; + } + + // repeated .StoreSequenceId storeSequenceId = 2; + private java.util.List storeSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreSequenceIdIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = new java.util.ArrayList(storeSequenceId_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_; + + public java.util.List getStoreSequenceIdList() { + if (storeSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeSequenceId_); + } else { + return storeSequenceIdBuilder_.getMessageList(); + } + } + public int getStoreSequenceIdCount() { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.size(); + } else { + return storeSequenceIdBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.get(index); + } else { + return storeSequenceIdBuilder_.getMessage(index); + } + } + public Builder setStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.set(index, value); + onChanged(); + } else { + storeSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + public Builder setStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(value); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(value); + } + return this; + } + public Builder addStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(index, value); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + public Builder addStoreSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllStoreSequenceId( + java.lang.Iterable values) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + super.addAll(values, storeSequenceId_); + onChanged(); + } else { + storeSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearStoreSequenceId() { + if (storeSequenceIdBuilder_ == null) { + storeSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + storeSequenceIdBuilder_.clear(); + } + return this; + } + public Builder removeStoreSequenceId(int index) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.remove(index); + onChanged(); + } else { + storeSequenceIdBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder( + int index) { + return getStoreSequenceIdFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index) { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.get(index); } else { + return storeSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getStoreSequenceIdOrBuilderList() { + if (storeSequenceIdBuilder_ != null) { + return storeSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeSequenceId_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() { + return getStoreSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder( + int index) { + return getStoreSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()); + } + public java.util.List + getStoreSequenceIdBuilderList() { + return getStoreSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> + getStoreSequenceIdFieldBuilder() { + if (storeSequenceIdBuilder_ == null) { + storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>( + storeSequenceId_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + storeSequenceId_ = null; + } + return storeSequenceIdBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionSequenceIds) + } + + static { + defaultInstance = new RegionSequenceIds(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionSequenceIds) + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_RootRegionServer_descriptor; private static @@ -5796,6 +6922,16 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TableLock_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StoreSequenceId_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StoreSequenceId_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionSequenceIds_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionSequenceIds_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -5827,9 +6963,13 @@ public final class ZooKeeperProtos { "ner\030\001 \002(\t\"\207\001\n\tTableLock\022\021\n\ttableName\030\001 \001", "(\014\022\036\n\tlockOwner\030\002 \001(\0132\013.ServerName\022\020\n\010th" + "readId\030\003 \001(\003\022\020\n\010isShared\030\004 \001(\010\022\017\n\007purpos" + - "e\030\005 \001(\t\022\022\n\ncreateTime\030\006 \001(\003BE\n*org.apach" + - "e.hadoop.hbase.protobuf.generatedB\017ZooKe" + - "eperProtosH\001\210\001\001\240\001\001" + "e\030\005 \001(\t\022\022\n\ncreateTime\030\006 \001(\003\"9\n\017StoreSequ" + + "enceId\022\022\n\nfamilyName\030\001 \002(\014\022\022\n\nsequenceId" + + "\030\002 \002(\004\"]\n\021RegionSequenceIds\022\035\n\025lastFlush" + + "edSequenceId\030\001 \002(\004\022)\n\017storeSequenceId\030\002 " + + "\003(\0132\020.StoreSequenceIdBE\n*org.apache.hado" + + "op.hbase.protobuf.generatedB\017ZooKeeperPr" + + "otosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -5924,6 +7064,22 @@ public final class ZooKeeperProtos { new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class); + internal_static_StoreSequenceId_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_StoreSequenceId_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StoreSequenceId_descriptor, + new java.lang.String[] { "FamilyName", "SequenceId", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder.class); + internal_static_RegionSequenceIds_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_RegionSequenceIds_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionSequenceIds_descriptor, + new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", }, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds.class, + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds.Builder.class); return null; } }; diff --git hbase-protocol/src/main/protobuf/ZooKeeper.proto hbase-protocol/src/main/protobuf/ZooKeeper.proto index 0e099d1..e509830 100644 --- hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -145,3 +145,20 @@ message TableLock { optional string purpose = 5; optional int64 createTime = 6; } + +/** + * sequence Id of a store + */ +message StoreSequenceId { + required bytes familyName = 1; + required uint64 sequenceId = 2; +} + +/** + * contains a sequence id of a region which should be the minimum of its store sequence ids and + * list sequence ids of the region + */ +message RegionSequenceIds { + required uint64 lastFlushedSequenceId = 1; + repeated StoreSequenceId storeSequenceId = 2; +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 621e7ae..fe7062f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.SplitLogManager.TaskFinisher.Status; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds; import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; @@ -1102,8 +1103,7 @@ public class SplitLogManager extends ZooKeeperListener { lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data); if (lastRecordedFlushedSequenceId < lastSequenceId) { // update last flushed sequence id in the region level - ZKUtil.setData(this.watcher, nodePath, - ZKUtil.positionToByteArray(lastSequenceId)); + ZKUtil.setData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId)); } } // go one level deeper with server name @@ -1113,7 +1113,7 @@ public class SplitLogManager extends ZooKeeperListener { lastSequenceId = lastRecordedFlushedSequenceId; } ZKUtil.createSetData(this.watcher, nodePath, - ZKUtil.positionToByteArray(lastSequenceId)); + ZKUtil.regionSequenceIdsToByteArray(lastSequenceId, null)); LOG.debug("Mark region " + regionEncodeName + " recovering from failed region server " + serverName); @@ -1158,10 +1158,11 @@ public class SplitLogManager extends ZooKeeperListener { * @param zkw * @param serverName * @param encodedRegionName - * @return the last flushed sequence id recorded in ZK of the region for serverName + * @return the last flushed sequence ids recorded in ZK of the region for serverName * @throws IOException */ - public static long getLastFlushedSequenceId(ZooKeeperWatcher zkw, String serverName, + public static RegionSequenceIds getRegionFlushedSequenceId(ZooKeeperWatcher zkw, + String serverName, String encodedRegionName) throws IOException { // when SplitLogWorker recovers a region by directly replaying unflushed WAL edits, // last flushed sequence Id changes when newly assigned RS flushes writes to the region. @@ -1170,19 +1171,21 @@ public class SplitLogManager extends ZooKeeperListener { // when different newly assigned RS flushes the region. // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of // last flushed sequence Id for each failed RS instance. - long lastFlushedSequenceId = -1; + RegionSequenceIds result = null; String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, encodedRegionName); nodePath = ZKUtil.joinZNode(nodePath, serverName); try { byte[] data = ZKUtil.getData(zkw, nodePath); if (data != null) { - lastFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data); + result = ZKUtil.parseRegionStoreSequenceIds(data); } } catch (KeeperException e) { throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server=" + serverName + "; region=" + encodedRegionName, e); + } catch (DeserializationException e) { + LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e); } - return lastFlushedSequenceId; + return result; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 176da9d..1b3c726 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -291,10 +291,13 @@ public class HRegion implements HeapSize { // , Writable{ private final AtomicInteger majorInProgress = new AtomicInteger(0); private final AtomicInteger minorInProgress = new AtomicInteger(0); - /** - * Min sequence id stored in store files of a region when opening the region - */ - private long minSeqIdForLogReplay = -1; + // + // Context: During replay we want to ensure that we do not lose any data. So, we + // have to be conservative in how we replay logs. For each store, we calculate + // the maxSeqId up to which the store was flushed. And, skip the edits which + // is equal to or lower than maxSeqId for each store. + // The following map is populated when opening the region + Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); /** * @return The smallest mvcc readPoint across all the scanners in this @@ -620,12 +623,7 @@ public class HRegion implements HeapSize { // , Writable{ private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status) throws IOException, UnsupportedEncodingException { // Load in all the HStores. - // - // Context: During replay we want to ensure that we do not lose any data. So, we - // have to be conservative in how we replay logs. For each store, we calculate - // the maxSeqId up to which the store was flushed. And, skip the edits which - // is equal to or lower than maxSeqId for each store. - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + long maxSeqId = -1; // initialized to -1 so that we pick up MemstoreTS from column families long maxMemstoreTS = -1; @@ -657,9 +655,6 @@ public class HRegion implements HeapSize { // , Writable{ long storeSeqIdForReplay = store.getMaxSequenceId(false); maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeSeqIdForReplay); - if (this.minSeqIdForLogReplay == -1 || storeSeqIdForReplay < this.minSeqIdForLogReplay) { - this.minSeqIdForLogReplay = storeSeqIdForReplay; - } // Include bulk loaded files when determining seqIdForAssignment long storeSeqIdForAssignment = store.getMaxSequenceId(true); if (maxSeqId == -1 || storeSeqIdForAssignment > maxSeqId) { @@ -5561,11 +5556,11 @@ public class HRegion implements HeapSize { // , Writable{ } /** - * Gets the min sequence number that was read from storage when this region was opened. WAL Edits - * with smaller sequence number will be skipped from replay. + * Gets max sequence ids of stores that was read from storage when this region was opened. WAL + * Edits with smaller or equal sequence number will be skipped from replay. */ - public long getMinSeqIdForLogReplay() { - return this.minSeqIdForLogReplay; + public Map getMaxStoreSeqIdForLogReplay() { + return this.maxSeqIdInStores; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 5119dc0..08881dd 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -4241,7 +4241,14 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa HRegionInfo region = r.getRegionInfo(); ZooKeeperWatcher zkw = getZooKeeper(); String previousRSName = this.getLastFailedRSFromZK(region.getEncodedName()); - long minSeqIdForLogReplay = r.getMinSeqIdForLogReplay(); + Map maxSeqIdInStores = r.getMaxStoreSeqIdForLogReplay(); + long minSeqIdForLogReplay = -1; + for (byte[] columnFamily : maxSeqIdInStores.keySet()) { + Long storeSeqIdForReplay = maxSeqIdInStores.get(columnFamily); + if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) { + minSeqIdForLogReplay = storeSeqIdForReplay; + } + } long lastRecordedFlushedSequenceId = -1; String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode, region.getEncodedName()); @@ -4254,10 +4261,11 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay)); } if (previousRSName != null) { - // one level deeper for failed RS + // one level deeper for the failed RS nodePath = ZKUtil.joinZNode(nodePath, previousRSName); - ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay)); - LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for " + ZKUtil.setData(zkw, nodePath, + ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores)); + LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for " + previousRSName); } else { LOG.warn("Can't find failed region server for recovering region " + region.getEncodedName()); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index cc78143..d264d67 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -28,6 +28,7 @@ import java.net.ConnectException; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -81,6 +82,8 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LastSequenceId; @@ -154,7 +157,14 @@ public class HLogSplitter { protected boolean distributedLogReplay; // Map encodedRegionName -> lastFlushedSequenceId - Map lastFlushedSequenceIds = new ConcurrentHashMap(); + protected Map lastFlushedSequenceIds = new ConcurrentHashMap(); + + // Map encodedRegionName -> maxSeqIdInStores + protected Map> regionMaxSeqIdInStores = + new ConcurrentHashMap>(); + + // Failed region server that the being split wal file belongs to + protected String failedServerName = ""; // Number of writer threads private final int numWriterThreads; @@ -528,19 +538,23 @@ public class HLogSplitter { Entry entry; Long lastFlushedSequenceId = -1L; ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logPath); - String serverNameStr = (serverName == null) ? "" : serverName.getServerName(); + failedServerName = (serverName == null) ? "" : serverName.getServerName(); while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) { byte[] region = entry.getKey().getEncodedRegionName(); String key = Bytes.toString(region); lastFlushedSequenceId = lastFlushedSequenceIds.get(key); if (lastFlushedSequenceId == null) { if (this.distributedLogReplay) { - lastFlushedSequenceId = SplitLogManager.getLastFlushedSequenceId(this.watcher, - serverNameStr, key); + lastFlushedSequenceId = -1L; + RegionSequenceIds ids = + SplitLogManager.getRegionFlushedSequenceId(this.watcher, failedServerName, key); + if (ids != null) { + lastFlushedSequenceId = ids.getLastFlushedSequenceId(); + } } else if (sequenceIdChecker != null) { lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region); } - if (lastFlushedSequenceId != null) { + if (lastFlushedSequenceId != null && lastFlushedSequenceId >= 0) { lastFlushedSequenceIds.put(key, lastFlushedSequenceId); } else { lastFlushedSequenceId = -1L; @@ -1712,6 +1726,8 @@ public class HLogSplitter { this.skippedEdits.incrementAndGet(); continue; } + + Map maxStoreSequenceIds = null; boolean needSkip = false; Put put = null; Delete del = null; @@ -1761,16 +1777,32 @@ public class HLogSplitter { needSkip = true; break; } - cachedLastFlushedSequenceId = lastFlushedSequenceIds.get(loc.getRegionInfo() - .getEncodedName()); + + cachedLastFlushedSequenceId = + lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName()); if (cachedLastFlushedSequenceId != null && cachedLastFlushedSequenceId >= entry.getKey().getLogSeqNum()) { // skip the whole HLog entry this.skippedEdits.incrementAndGet(); needSkip = true; break; + } else { + if (maxStoreSequenceIds == null) { + maxStoreSequenceIds = + regionMaxSeqIdInStores.get(loc.getRegionInfo().getEncodedName()); + } + if (maxStoreSequenceIds != null) { + Long maxStoreSeqId = maxStoreSequenceIds.get(kv.getFamily()); + if (maxStoreSeqId == null || maxStoreSeqId >= entry.getKey().getLogSeqNum()) { + // skip current kv if column family doesn't exist anymore or already flushed + this.skippedEdits.incrementAndGet(); + continue; + } + } else { + LOG.warn("Can't find store max sequence ids map"); + } } - + if (kv.isDelete()) { del = new Delete(kv.getRow()); del.setClusterId(entry.getKey().getClusterId()); @@ -1834,8 +1866,19 @@ public class HLogSplitter { onlineRegions.add(loc.getRegionInfo().getEncodedName()); // retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will // update the value for the region - lastFlushedSequenceId = SplitLogManager.getLastFlushedSequenceId(watcher, loc - .getServerName().getServerName(), loc.getRegionInfo().getEncodedName()); + RegionSequenceIds ids = + SplitLogManager.getRegionFlushedSequenceId(watcher, failedServerName, loc.getRegionInfo() + .getEncodedName()); + if(ids != null) { + lastFlushedSequenceId = ids.getLastFlushedSequenceId(); + Map storeIds = new TreeMap(Bytes.BYTES_COMPARATOR); + List maxSeqIdInStores = ids.getStoreSequenceIdList(); + for (StoreSequenceId id : maxSeqIdInStores) { + storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId()); + } + regionMaxSeqIdInStores.put(loc.getRegionInfo().getEncodedName(), storeIds); + } + if (cachedLastFlushedSequenceId == null || lastFlushedSequenceId > cachedLastFlushedSequenceId) { lastFlushedSequenceIds.put(loc.getRegionInfo().getEncodedName(), lastFlushedSequenceId); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 1945123..05130e9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -823,7 +823,7 @@ public class TestDistributedLogSplitting { @Test (timeout=300000) public void testWorkerAbort() throws Exception { LOG.info("testWorkerAbort"); - startCluster(2); + startCluster(3); final int NUM_LOG_LINES = 10000; final SplitLogManager slm = master.getMasterFileSystem().splitLogManager; FileSystem fs = master.getMasterFileSystem().getFileSystem(); @@ -845,6 +845,7 @@ public class TestDistributedLogSplitting { waitForCounter(tot_wkr_task_acquired, 0, 1, 1000); for (RegionServerThread rst : rsts) { rst.getRegionServer().abort("testing"); + break; } } }.start();