From e2baf80e7f0d237b66c0e9f64b3cd1b85a4705e3 Mon Sep 17 00:00:00 2001 From: zhangduo Date: Sun, 21 Dec 2014 16:40:35 +0800 Subject: [PATCH] HBASE-12405 WAL accounting by Store --- .../java/org/apache/hadoop/hbase/RegionLoad.java | 10 + .../hadoop/hbase/protobuf/ResponseConverter.java | 7 +- .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java | 13 +- .../protobuf/generated/ClusterStatusProtos.java | 1981 +++++++++++++++++++- .../generated/RegionServerStatusProtos.java | 569 +++++- .../protobuf/generated/VisibilityLabelsProtos.java | 6 +- .../hbase/protobuf/generated/ZooKeeperProtos.java | 1417 +------------- .../src/main/protobuf/ClusterStatus.proto | 20 + .../src/main/protobuf/RegionServerStatus.proto | 5 +- hbase-protocol/src/main/protobuf/ZooKeeper.proto | 17 - .../coordination/SplitLogWorkerCoordination.java | 4 +- .../ZKSplitLogManagerCoordination.java | 7 +- .../coordination/ZkSplitLogWorkerCoordination.java | 6 +- .../hadoop/hbase/master/MasterRpcServices.java | 10 +- .../apache/hadoop/hbase/master/ServerManager.java | 71 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 38 +- .../hadoop/hbase/regionserver/HRegionServer.java | 32 +- .../hadoop/hbase/regionserver/LastSequenceId.java | 5 +- .../hadoop/hbase/regionserver/wal/FSHLog.java | 2 +- .../hadoop/hbase/regionserver/wal/FSWALEntry.java | 30 +- .../org/apache/hadoop/hbase/wal/WALSplitter.java | 79 +- .../apache/hadoop/hbase/zookeeper/ZKSplitLog.java | 4 +- .../hbase/master/TestGetLastFlushedSequenceId.java | 19 +- .../regionserver/TestPerColumnFamilyFlush.java | 43 +- 24 files changed, 2752 insertions(+), 1643 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 234c5ae..a2e54a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -20,9 +20,12 @@ package org.apache.hadoop.hbase; +import java.util.List; + import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; @@ -154,6 +157,13 @@ public class RegionLoad { } /** + * @return completed sequence id per store. + */ + public List getStoreCompleteSequenceId() { + return regionLoadPB.getStoreCompleteSequenceIdList(); + } + + /** * @return the uncompressed size of the storefiles in MB. */ public int getStoreUncompressedSizeMB() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index 1d42a82..a98da7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; @@ -300,8 +301,10 @@ public final class ResponseConverter { * @return A GetLastFlushedSequenceIdResponse */ public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse( - long seqId) { - return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build(); + RegionStoreSequenceIds ids) { + return GetLastFlushedSequenceIdResponse.newBuilder() + .setLastFlushedSequenceId(ids.getLastFlushedSequenceId()) + .addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build(); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index da0d8b2..82e6f8d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -44,8 +44,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -1929,10 +1930,10 @@ public class ZKUtil { */ public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId, final Map storeSequenceIds) { - ZooKeeperProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = - ZooKeeperProtos.RegionStoreSequenceIds.newBuilder(); - ZooKeeperProtos.StoreSequenceId.Builder storeSequenceIdBuilder = - ZooKeeperProtos.StoreSequenceId.newBuilder(); + ClusterStatusProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = + ClusterStatusProtos.RegionStoreSequenceIds.newBuilder(); + ClusterStatusProtos.StoreSequenceId.Builder storeSequenceIdBuilder = + ClusterStatusProtos.StoreSequenceId.newBuilder(); if (storeSequenceIds != null) { for (Map.Entry e : storeSequenceIds.entrySet()){ byte[] columnFamilyName = e.getKey(); @@ -1959,7 +1960,7 @@ public class ZKUtil { throw new DeserializationException("Unable to parse RegionStoreSequenceIds."); } RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = - ZooKeeperProtos.RegionStoreSequenceIds.newBuilder(); + ClusterStatusProtos.RegionStoreSequenceIds.newBuilder(); int pblen = ProtobufUtil.lengthOfPBMagic(); RegionStoreSequenceIds storeIds = null; try { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index 5bc44ff..2d0aef0 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -1865,6 +1865,1393 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionInTransition) } + public interface StoreSequenceIdOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes family_name = 1; + /** + * required bytes family_name = 1; + */ + boolean hasFamilyName(); + /** + * required bytes family_name = 1; + */ + com.google.protobuf.ByteString getFamilyName(); + + // required uint64 sequence_id = 2; + /** + * required uint64 sequence_id = 2; + */ + boolean hasSequenceId(); + /** + * required uint64 sequence_id = 2; + */ + long getSequenceId(); + } + /** + * Protobuf type {@code StoreSequenceId} + * + *
+   **
+   * sequence Id of a store
+   * 
+ */ + public static final class StoreSequenceId extends + com.google.protobuf.GeneratedMessage + implements StoreSequenceIdOrBuilder { + // Use StoreSequenceId.newBuilder() to construct. + private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreSequenceId defaultInstance; + public static StoreSequenceId getDefaultInstance() { + return defaultInstance; + } + + public StoreSequenceId getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreSequenceId( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + familyName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + sequenceId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreSequenceId parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreSequenceId(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes family_name = 1; + public static final int FAMILY_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString familyName_; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + + // required uint64 sequence_id = 2; + public static final int SEQUENCE_ID_FIELD_NUMBER = 2; + private long sequenceId_; + /** + * required uint64 sequence_id = 2; + */ + public boolean hasSequenceId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 sequence_id = 2; + */ + public long getSequenceId() { + return sequenceId_; + } + + private void initFields() { + familyName_ = com.google.protobuf.ByteString.EMPTY; + sequenceId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSequenceId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, sequenceId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, sequenceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) obj; + + boolean result = true; + result = result && (hasFamilyName() == other.hasFamilyName()); + if (hasFamilyName()) { + result = result && getFamilyName() + .equals(other.getFamilyName()); + } + result = result && (hasSequenceId() == other.hasSequenceId()); + if (hasSequenceId()) { + result = result && (getSequenceId() + == other.getSequenceId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFamilyName()) { + hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getFamilyName().hashCode(); + } + if (hasSequenceId()) { + hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSequenceId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code StoreSequenceId} + * + *
+     **
+     * sequence Id of a store
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + familyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + sequenceId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.familyName_ = familyName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.sequenceId_ = sequenceId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()) return this; + if (other.hasFamilyName()) { + setFamilyName(other.getFamilyName()); + } + if (other.hasSequenceId()) { + setSequenceId(other.getSequenceId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFamilyName()) { + + return false; + } + if (!hasSequenceId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes family_name = 1; + private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + /** + * required bytes family_name = 1; + */ + public Builder setFamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + familyName_ = value; + onChanged(); + return this; + } + /** + * required bytes family_name = 1; + */ + public Builder clearFamilyName() { + bitField0_ = (bitField0_ & ~0x00000001); + familyName_ = getDefaultInstance().getFamilyName(); + onChanged(); + return this; + } + + // required uint64 sequence_id = 2; + private long sequenceId_ ; + /** + * required uint64 sequence_id = 2; + */ + public boolean hasSequenceId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 sequence_id = 2; + */ + public long getSequenceId() { + return sequenceId_; + } + /** + * required uint64 sequence_id = 2; + */ + public Builder setSequenceId(long value) { + bitField0_ |= 0x00000002; + sequenceId_ = value; + onChanged(); + return this; + } + /** + * required uint64 sequence_id = 2; + */ + public Builder clearSequenceId() { + bitField0_ = (bitField0_ & ~0x00000002); + sequenceId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StoreSequenceId) + } + + static { + defaultInstance = new StoreSequenceId(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StoreSequenceId) + } + + public interface RegionStoreSequenceIdsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 last_flushed_sequence_id = 1; + /** + * required uint64 last_flushed_sequence_id = 1; + */ + boolean hasLastFlushedSequenceId(); + /** + * required uint64 last_flushed_sequence_id = 1; + */ + long getLastFlushedSequenceId(); + + // repeated .StoreSequenceId store_sequence_id = 2; + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + java.util.List + getStoreSequenceIdList(); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + int getStoreSequenceIdCount(); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + java.util.List + getStoreSequenceIdOrBuilderList(); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index); + } + /** + * Protobuf type {@code RegionStoreSequenceIds} + * + *
+   **
+   * contains a sequence id of a region which should be the minimum of its store sequence ids and 
+   * list sequence ids of the region's stores
+   * 
+ */ + public static final class RegionStoreSequenceIds extends + com.google.protobuf.GeneratedMessage + implements RegionStoreSequenceIdsOrBuilder { + // Use RegionStoreSequenceIds.newBuilder() to construct. + private RegionStoreSequenceIds(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegionStoreSequenceIds(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegionStoreSequenceIds defaultInstance; + public static RegionStoreSequenceIds getDefaultInstance() { + return defaultInstance; + } + + public RegionStoreSequenceIds getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionStoreSequenceIds( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lastFlushedSequenceId_ = input.readUInt64(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + storeSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionStoreSequenceIds parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionStoreSequenceIds(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 last_flushed_sequence_id = 1; + public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1; + private long lastFlushedSequenceId_; + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public boolean hasLastFlushedSequenceId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public long getLastFlushedSequenceId() { + return lastFlushedSequenceId_; + } + + // repeated .StoreSequenceId store_sequence_id = 2; + public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2; + private java.util.List storeSequenceId_; + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List getStoreSequenceIdList() { + return storeSequenceId_; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List + getStoreSequenceIdOrBuilderList() { + return storeSequenceId_; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public int getStoreSequenceIdCount() { + return storeSequenceId_.size(); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) { + return storeSequenceId_.get(index); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index) { + return storeSequenceId_.get(index); + } + + private void initFields() { + lastFlushedSequenceId_ = 0L; + storeSequenceId_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasLastFlushedSequenceId()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getStoreSequenceIdCount(); i++) { + if (!getStoreSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, lastFlushedSequenceId_); + } + for (int i = 0; i < storeSequenceId_.size(); i++) { + output.writeMessage(2, storeSequenceId_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, lastFlushedSequenceId_); + } + for (int i = 0; i < storeSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, storeSequenceId_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) obj; + + boolean result = true; + result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId()); + if (hasLastFlushedSequenceId()) { + result = result && (getLastFlushedSequenceId() + == other.getLastFlushedSequenceId()); + } + result = result && getStoreSequenceIdList() + .equals(other.getStoreSequenceIdList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLastFlushedSequenceId()) { + hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); + } + if (getStoreSequenceIdCount() > 0) { + hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getStoreSequenceIdList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegionStoreSequenceIds} + * + *
+     **
+     * contains a sequence id of a region which should be the minimum of its store sequence ids and 
+     * list sequence ids of the region's stores
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIdsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreSequenceIdFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lastFlushedSequenceId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (storeSequenceIdBuilder_ == null) { + storeSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + storeSequenceIdBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lastFlushedSequenceId_ = lastFlushedSequenceId_; + if (storeSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.storeSequenceId_ = storeSequenceId_; + } else { + result.storeSequenceId_ = storeSequenceIdBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance()) return this; + if (other.hasLastFlushedSequenceId()) { + setLastFlushedSequenceId(other.getLastFlushedSequenceId()); + } + if (storeSequenceIdBuilder_ == null) { + if (!other.storeSequenceId_.isEmpty()) { + if (storeSequenceId_.isEmpty()) { + storeSequenceId_ = other.storeSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.addAll(other.storeSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeSequenceId_.isEmpty()) { + if (storeSequenceIdBuilder_.isEmpty()) { + storeSequenceIdBuilder_.dispose(); + storeSequenceIdBuilder_ = null; + storeSequenceId_ = other.storeSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + storeSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreSequenceIdFieldBuilder() : null; + } else { + storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasLastFlushedSequenceId()) { + + return false; + } + for (int i = 0; i < getStoreSequenceIdCount(); i++) { + if (!getStoreSequenceId(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 last_flushed_sequence_id = 1; + private long lastFlushedSequenceId_ ; + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public boolean hasLastFlushedSequenceId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public long getLastFlushedSequenceId() { + return lastFlushedSequenceId_; + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public Builder setLastFlushedSequenceId(long value) { + bitField0_ |= 0x00000001; + lastFlushedSequenceId_ = value; + onChanged(); + return this; + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public Builder clearLastFlushedSequenceId() { + bitField0_ = (bitField0_ & ~0x00000001); + lastFlushedSequenceId_ = 0L; + onChanged(); + return this; + } + + // repeated .StoreSequenceId store_sequence_id = 2; + private java.util.List storeSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreSequenceIdIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = new java.util.ArrayList(storeSequenceId_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_; + + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List getStoreSequenceIdList() { + if (storeSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeSequenceId_); + } else { + return storeSequenceIdBuilder_.getMessageList(); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public int getStoreSequenceIdCount() { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.size(); + } else { + return storeSequenceIdBuilder_.getCount(); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.get(index); + } else { + return storeSequenceIdBuilder_.getMessage(index); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder setStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.set(index, value); + onChanged(); + } else { + storeSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder setStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(value); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(index, value); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addAllStoreSequenceId( + java.lang.Iterable values) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + super.addAll(values, storeSequenceId_); + onChanged(); + } else { + storeSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder clearStoreSequenceId() { + if (storeSequenceIdBuilder_ == null) { + storeSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + storeSequenceIdBuilder_.clear(); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder removeStoreSequenceId(int index) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.remove(index); + onChanged(); + } else { + storeSequenceIdBuilder_.remove(index); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder( + int index) { + return getStoreSequenceIdFieldBuilder().getBuilder(index); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index) { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.get(index); } else { + return storeSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List + getStoreSequenceIdOrBuilderList() { + if (storeSequenceIdBuilder_ != null) { + return storeSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeSequenceId_); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() { + return getStoreSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder( + int index) { + return getStoreSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List + getStoreSequenceIdBuilderList() { + return getStoreSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> + getStoreSequenceIdFieldBuilder() { + if (storeSequenceIdBuilder_ == null) { + storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>( + storeSequenceId_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + storeSequenceId_ = null; + } + return storeSequenceIdBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionStoreSequenceIds) + } + + static { + defaultInstance = new RegionStoreSequenceIds(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionStoreSequenceIds) + } + public interface RegionLoadOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -2171,6 +3558,51 @@ public final class ClusterStatusProtos { * */ float getDataLocality(); + + // repeated .StoreSequenceId store_complete_sequence_id = 17; + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + java.util.List + getStoreCompleteSequenceIdList(); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + int getStoreCompleteSequenceIdCount(); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + java.util.List + getStoreCompleteSequenceIdOrBuilderList(); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder( + int index); } /** * Protobuf type {@code RegionLoad} @@ -2311,6 +3743,14 @@ public final class ClusterStatusProtos { dataLocality_ = input.readFloat(); break; } + case 138: { + if (!((mutable_bitField0_ & 0x00010000) == 0x00010000)) { + storeCompleteSequenceId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00010000; + } + storeCompleteSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -2319,6 +3759,9 @@ public final class ClusterStatusProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00010000) == 0x00010000)) { + storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -2753,6 +4196,62 @@ public final class ClusterStatusProtos { return dataLocality_; } + // repeated .StoreSequenceId store_complete_sequence_id = 17; + public static final int STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER = 17; + private java.util.List storeCompleteSequenceId_; + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + public java.util.List getStoreCompleteSequenceIdList() { + return storeCompleteSequenceId_; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + public java.util.List + getStoreCompleteSequenceIdOrBuilderList() { + return storeCompleteSequenceId_; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + public int getStoreCompleteSequenceIdCount() { + return storeCompleteSequenceId_.size(); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) { + return storeCompleteSequenceId_.get(index); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+     ** the most recent sequence Id of store from cache flush 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder( + int index) { + return storeCompleteSequenceId_.get(index); + } + private void initFields() { regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); stores_ = 0; @@ -2770,6 +4269,7 @@ public final class ClusterStatusProtos { totalStaticBloomSizeKB_ = 0; completeSequenceId_ = 0L; dataLocality_ = 0F; + storeCompleteSequenceId_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2784,6 +4284,12 @@ public final class ClusterStatusProtos { memoizedIsInitialized = 0; return false; } + for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) { + if (!getStoreCompleteSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -2839,6 +4345,9 @@ public final class ClusterStatusProtos { if (((bitField0_ & 0x00008000) == 0x00008000)) { output.writeFloat(16, dataLocality_); } + for (int i = 0; i < storeCompleteSequenceId_.size(); i++) { + output.writeMessage(17, storeCompleteSequenceId_.get(i)); + } getUnknownFields().writeTo(output); } @@ -2912,6 +4421,10 @@ public final class ClusterStatusProtos { size += com.google.protobuf.CodedOutputStream .computeFloatSize(16, dataLocality_); } + for (int i = 0; i < storeCompleteSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(17, storeCompleteSequenceId_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3014,6 +4527,8 @@ public final class ClusterStatusProtos { if (hasDataLocality()) { result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality())); } + result = result && getStoreCompleteSequenceIdList() + .equals(other.getStoreCompleteSequenceIdList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3092,6 +4607,10 @@ public final class ClusterStatusProtos { hash = (53 * hash) + Float.floatToIntBits( getDataLocality()); } + if (getStoreCompleteSequenceIdCount() > 0) { + hash = (37 * hash) + STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getStoreCompleteSequenceIdList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3194,6 +4713,7 @@ public final class ClusterStatusProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionSpecifierFieldBuilder(); + getStoreCompleteSequenceIdFieldBuilder(); } } private static Builder create() { @@ -3238,6 +4758,12 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00004000); dataLocality_ = 0F; bitField0_ = (bitField0_ & ~0x00008000); + if (storeCompleteSequenceIdBuilder_ == null) { + storeCompleteSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00010000); + } else { + storeCompleteSequenceIdBuilder_.clear(); + } return this; } @@ -3334,6 +4860,15 @@ public final class ClusterStatusProtos { to_bitField0_ |= 0x00008000; } result.dataLocality_ = dataLocality_; + if (storeCompleteSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00010000) == 0x00010000)) { + storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + bitField0_ = (bitField0_ & ~0x00010000); + } + result.storeCompleteSequenceId_ = storeCompleteSequenceId_; + } else { + result.storeCompleteSequenceId_ = storeCompleteSequenceIdBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3398,6 +4933,32 @@ public final class ClusterStatusProtos { if (other.hasDataLocality()) { setDataLocality(other.getDataLocality()); } + if (storeCompleteSequenceIdBuilder_ == null) { + if (!other.storeCompleteSequenceId_.isEmpty()) { + if (storeCompleteSequenceId_.isEmpty()) { + storeCompleteSequenceId_ = other.storeCompleteSequenceId_; + bitField0_ = (bitField0_ & ~0x00010000); + } else { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.addAll(other.storeCompleteSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeCompleteSequenceId_.isEmpty()) { + if (storeCompleteSequenceIdBuilder_.isEmpty()) { + storeCompleteSequenceIdBuilder_.dispose(); + storeCompleteSequenceIdBuilder_ = null; + storeCompleteSequenceId_ = other.storeCompleteSequenceId_; + bitField0_ = (bitField0_ & ~0x00010000); + storeCompleteSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreCompleteSequenceIdFieldBuilder() : null; + } else { + storeCompleteSequenceIdBuilder_.addAllMessages(other.storeCompleteSequenceId_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -3411,6 +4972,12 @@ public final class ClusterStatusProtos { return false; } + for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) { + if (!getStoreCompleteSequenceId(i).isInitialized()) { + + return false; + } + } return true; } @@ -4337,6 +5904,318 @@ public final class ClusterStatusProtos { return this; } + // repeated .StoreSequenceId store_complete_sequence_id = 17; + private java.util.List storeCompleteSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreCompleteSequenceIdIsMutable() { + if (!((bitField0_ & 0x00010000) == 0x00010000)) { + storeCompleteSequenceId_ = new java.util.ArrayList(storeCompleteSequenceId_); + bitField0_ |= 0x00010000; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeCompleteSequenceIdBuilder_; + + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public java.util.List getStoreCompleteSequenceIdList() { + if (storeCompleteSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + } else { + return storeCompleteSequenceIdBuilder_.getMessageList(); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public int getStoreCompleteSequenceIdCount() { + if (storeCompleteSequenceIdBuilder_ == null) { + return storeCompleteSequenceId_.size(); + } else { + return storeCompleteSequenceIdBuilder_.getCount(); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) { + if (storeCompleteSequenceIdBuilder_ == null) { + return storeCompleteSequenceId_.get(index); + } else { + return storeCompleteSequenceIdBuilder_.getMessage(index); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder setStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeCompleteSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.set(index, value); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder setStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder addStoreCompleteSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeCompleteSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(value); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder addStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeCompleteSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(index, value); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder addStoreCompleteSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder addStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder addAllStoreCompleteSequenceId( + java.lang.Iterable values) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + super.addAll(values, storeCompleteSequenceId_); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder clearStoreCompleteSequenceId() { + if (storeCompleteSequenceIdBuilder_ == null) { + storeCompleteSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00010000); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.clear(); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public Builder removeStoreCompleteSequenceId(int index) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.remove(index); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.remove(index); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreCompleteSequenceIdBuilder( + int index) { + return getStoreCompleteSequenceIdFieldBuilder().getBuilder(index); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder( + int index) { + if (storeCompleteSequenceIdBuilder_ == null) { + return storeCompleteSequenceId_.get(index); } else { + return storeCompleteSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public java.util.List + getStoreCompleteSequenceIdOrBuilderList() { + if (storeCompleteSequenceIdBuilder_ != null) { + return storeCompleteSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder() { + return getStoreCompleteSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder( + int index) { + return getStoreCompleteSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 17; + * + *
+       ** the most recent sequence Id of store from cache flush 
+       * 
+ */ + public java.util.List + getStoreCompleteSequenceIdBuilderList() { + return getStoreCompleteSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> + getStoreCompleteSequenceIdFieldBuilder() { + if (storeCompleteSequenceIdBuilder_ == null) { + storeCompleteSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>( + storeCompleteSequenceId_, + ((bitField0_ & 0x00010000) == 0x00010000), + getParentForChildren(), + isClean()); + storeCompleteSequenceId_ = null; + } + return storeCompleteSequenceIdBuilder_; + } + // @@protoc_insertion_point(builder_scope:RegionLoad) } @@ -10432,6 +12311,16 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionInTransition_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_StoreSequenceId_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StoreSequenceId_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionStoreSequenceIds_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionStoreSequenceIds_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionLoad_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -10472,38 +12361,44 @@ public final class ClusterStatusProtos { "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio", "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" + "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" + - "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + - "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + - "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" + - "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" + - "\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027storefile_i" + - "ndex_size_MB\030\007 \001(\r\022\033\n\023read_requests_coun" + - "t\030\010 \001(\004\022\034\n\024write_requests_count\030\t \001(\004\022\034\n" + - "\024total_compacting_KVs\030\n \001(\004\022\035\n\025current_c", - "ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" + - "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + - "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + - "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" + - "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" + - "requests\030\001 \001(\r\022 \n\030total_number_of_reques" + - "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" + - "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" + - "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" + - "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_", - "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" + - "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" + - "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" + - "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" + - "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" + - "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" + - "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" + - "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" + - "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" + - "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030", - "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" + - "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" + - "org.apache.hadoop.hbase.protobuf.generat" + - "edB\023ClusterStatusProtosH\001\240\001\001" + "e\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002(" + + "\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSeq" + + "uenceIds\022 \n\030last_flushed_sequence_id\030\001 \002" + + "(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSeq" + + "uenceId\"\235\004\n\nRegionLoad\022*\n\020region_specifi" + + "er\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001" + + "(\r\022\022\n\nstorefiles\030\003 \001(\r\022\"\n\032store_uncompre" + + "ssed_size_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030", + "\005 \001(\r\022\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027store" + + "file_index_size_MB\030\007 \001(\r\022\033\n\023read_request" + + "s_count\030\010 \001(\004\022\034\n\024write_requests_count\030\t " + + "\001(\004\022\034\n\024total_compacting_KVs\030\n \001(\004\022\035\n\025cur" + + "rent_compacted_KVs\030\013 \001(\004\022\032\n\022root_index_s" + + "ize_KB\030\014 \001(\r\022\"\n\032total_static_index_size_" + + "KB\030\r \001(\r\022\"\n\032total_static_bloom_size_KB\030\016" + + " \001(\r\022\034\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rda" + + "ta_locality\030\020 \001(\002\0224\n\032store_complete_sequ" + + "ence_id\030\021 \003(\0132\020.StoreSequenceId\"\212\002\n\nServ", + "erLoad\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030to" + + "tal_number_of_requests\030\002 \001(\r\022\024\n\014used_hea" + + "p_MB\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014regio" + + "n_loads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocesso" + + "rs\030\006 \003(\0132\014.Coprocessor\022\031\n\021report_start_t" + + "ime\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020in" + + "fo_server_port\030\t \001(\r\"O\n\016LiveServerInfo\022\033" + + "\n\006server\030\001 \002(\0132\013.ServerName\022 \n\013server_lo" + + "ad\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rClusterStatus\022" + + "/\n\rhbase_version\030\001 \001(\0132\030.HBaseVersionFil", + "eContent\022%\n\014live_servers\030\002 \003(\0132\017.LiveSer" + + "verInfo\022!\n\014dead_servers\030\003 \003(\0132\013.ServerNa" + + "me\0222\n\025regions_in_transition\030\004 \003(\0132\023.Regi" + + "onInTransition\022\036\n\ncluster_id\030\005 \001(\0132\n.Clu" + + "sterId\022)\n\023master_coprocessors\030\006 \003(\0132\014.Co" + + "processor\022\033\n\006master\030\007 \001(\0132\013.ServerName\022#" + + "\n\016backup_masters\030\010 \003(\0132\013.ServerName\022\023\n\013b" + + "alancer_on\030\t \001(\010BF\n*org.apache.hadoop.hb" + + "ase.protobuf.generatedB\023ClusterStatusPro" + + "tosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10522,26 +12417,38 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInTransition_descriptor, new java.lang.String[] { "Spec", "RegionState", }); - internal_static_RegionLoad_descriptor = + internal_static_StoreSequenceId_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_StoreSequenceId_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StoreSequenceId_descriptor, + new java.lang.String[] { "FamilyName", "SequenceId", }); + internal_static_RegionStoreSequenceIds_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_RegionStoreSequenceIds_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionStoreSequenceIds_descriptor, + new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", }); + internal_static_RegionLoad_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_RegionLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, - new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", }); + new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "StoreCompleteSequenceId", }); internal_static_ServerLoad_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(5); internal_static_ServerLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerLoad_descriptor, new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", }); internal_static_LiveServerInfo_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_LiveServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LiveServerInfo_descriptor, new java.lang.String[] { "Server", "ServerLoad", }); internal_static_ClusterStatus_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_ClusterStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterStatus_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java index ec169d5..1a4e1a5 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java @@ -4496,7 +4496,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-     * the last WAL sequence id flushed from MemStore to HFile for the region 
+     ** the last WAL sequence id flushed from MemStore to HFile for the region 
      * 
*/ boolean hasLastFlushedSequenceId(); @@ -4504,10 +4504,55 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-     * the last WAL sequence id flushed from MemStore to HFile for the region 
+     ** the last WAL sequence id flushed from MemStore to HFile for the region 
      * 
*/ long getLastFlushedSequenceId(); + + // repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + java.util.List + getStoreLastFlushedSequenceIdList(); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreLastFlushedSequenceId(int index); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + int getStoreLastFlushedSequenceIdCount(); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + java.util.List + getStoreLastFlushedSequenceIdOrBuilderList(); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreLastFlushedSequenceIdOrBuilder( + int index); } /** * Protobuf type {@code GetLastFlushedSequenceIdResponse} @@ -4565,6 +4610,14 @@ public final class RegionServerStatusProtos { lastFlushedSequenceId_ = input.readUInt64(); break; } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + storeLastFlushedSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4573,6 +4626,9 @@ public final class RegionServerStatusProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -4612,7 +4668,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-     * the last WAL sequence id flushed from MemStore to HFile for the region 
+     ** the last WAL sequence id flushed from MemStore to HFile for the region 
      * 
*/ public boolean hasLastFlushedSequenceId() { @@ -4622,15 +4678,72 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-     * the last WAL sequence id flushed from MemStore to HFile for the region 
+     ** the last WAL sequence id flushed from MemStore to HFile for the region 
      * 
*/ public long getLastFlushedSequenceId() { return lastFlushedSequenceId_; } + // repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + public static final int STORE_LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 2; + private java.util.List storeLastFlushedSequenceId_; + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + public java.util.List getStoreLastFlushedSequenceIdList() { + return storeLastFlushedSequenceId_; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + public java.util.List + getStoreLastFlushedSequenceIdOrBuilderList() { + return storeLastFlushedSequenceId_; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + public int getStoreLastFlushedSequenceIdCount() { + return storeLastFlushedSequenceId_.size(); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreLastFlushedSequenceId(int index) { + return storeLastFlushedSequenceId_.get(index); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreLastFlushedSequenceIdOrBuilder( + int index) { + return storeLastFlushedSequenceId_.get(index); + } + private void initFields() { lastFlushedSequenceId_ = 0L; + storeLastFlushedSequenceId_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4641,6 +4754,12 @@ public final class RegionServerStatusProtos { memoizedIsInitialized = 0; return false; } + for (int i = 0; i < getStoreLastFlushedSequenceIdCount(); i++) { + if (!getStoreLastFlushedSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -4651,6 +4770,9 @@ public final class RegionServerStatusProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lastFlushedSequenceId_); } + for (int i = 0; i < storeLastFlushedSequenceId_.size(); i++) { + output.writeMessage(2, storeLastFlushedSequenceId_.get(i)); + } getUnknownFields().writeTo(output); } @@ -4664,6 +4786,10 @@ public final class RegionServerStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lastFlushedSequenceId_); } + for (int i = 0; i < storeLastFlushedSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, storeLastFlushedSequenceId_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4692,6 +4818,8 @@ public final class RegionServerStatusProtos { result = result && (getLastFlushedSequenceId() == other.getLastFlushedSequenceId()); } + result = result && getStoreLastFlushedSequenceIdList() + .equals(other.getStoreLastFlushedSequenceIdList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -4709,6 +4837,10 @@ public final class RegionServerStatusProtos { hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); } + if (getStoreLastFlushedSequenceIdCount() > 0) { + hash = (37 * hash) + STORE_LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getStoreLastFlushedSequenceIdList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -4810,6 +4942,7 @@ public final class RegionServerStatusProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreLastFlushedSequenceIdFieldBuilder(); } } private static Builder create() { @@ -4820,6 +4953,12 @@ public final class RegionServerStatusProtos { super.clear(); lastFlushedSequenceId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + if (storeLastFlushedSequenceIdBuilder_ == null) { + storeLastFlushedSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + storeLastFlushedSequenceIdBuilder_.clear(); + } return this; } @@ -4852,6 +4991,15 @@ public final class RegionServerStatusProtos { to_bitField0_ |= 0x00000001; } result.lastFlushedSequenceId_ = lastFlushedSequenceId_; + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.storeLastFlushedSequenceId_ = storeLastFlushedSequenceId_; + } else { + result.storeLastFlushedSequenceId_ = storeLastFlushedSequenceIdBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4871,6 +5019,32 @@ public final class RegionServerStatusProtos { if (other.hasLastFlushedSequenceId()) { setLastFlushedSequenceId(other.getLastFlushedSequenceId()); } + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (!other.storeLastFlushedSequenceId_.isEmpty()) { + if (storeLastFlushedSequenceId_.isEmpty()) { + storeLastFlushedSequenceId_ = other.storeLastFlushedSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.addAll(other.storeLastFlushedSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeLastFlushedSequenceId_.isEmpty()) { + if (storeLastFlushedSequenceIdBuilder_.isEmpty()) { + storeLastFlushedSequenceIdBuilder_.dispose(); + storeLastFlushedSequenceIdBuilder_ = null; + storeLastFlushedSequenceId_ = other.storeLastFlushedSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + storeLastFlushedSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreLastFlushedSequenceIdFieldBuilder() : null; + } else { + storeLastFlushedSequenceIdBuilder_.addAllMessages(other.storeLastFlushedSequenceId_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4880,6 +5054,12 @@ public final class RegionServerStatusProtos { return false; } + for (int i = 0; i < getStoreLastFlushedSequenceIdCount(); i++) { + if (!getStoreLastFlushedSequenceId(i).isInitialized()) { + + return false; + } + } return true; } @@ -4908,7 +5088,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-       * the last WAL sequence id flushed from MemStore to HFile for the region 
+       ** the last WAL sequence id flushed from MemStore to HFile for the region 
        * 
*/ public boolean hasLastFlushedSequenceId() { @@ -4918,7 +5098,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-       * the last WAL sequence id flushed from MemStore to HFile for the region 
+       ** the last WAL sequence id flushed from MemStore to HFile for the region 
        * 
*/ public long getLastFlushedSequenceId() { @@ -4928,7 +5108,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-       * the last WAL sequence id flushed from MemStore to HFile for the region 
+       ** the last WAL sequence id flushed from MemStore to HFile for the region 
        * 
*/ public Builder setLastFlushedSequenceId(long value) { @@ -4941,7 +5121,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
-       * the last WAL sequence id flushed from MemStore to HFile for the region 
+       ** the last WAL sequence id flushed from MemStore to HFile for the region 
        * 
*/ public Builder clearLastFlushedSequenceId() { @@ -4951,6 +5131,318 @@ public final class RegionServerStatusProtos { return this; } + // repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + private java.util.List storeLastFlushedSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreLastFlushedSequenceIdIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = new java.util.ArrayList(storeLastFlushedSequenceId_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeLastFlushedSequenceIdBuilder_; + + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public java.util.List getStoreLastFlushedSequenceIdList() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + } else { + return storeLastFlushedSequenceIdBuilder_.getMessageList(); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public int getStoreLastFlushedSequenceIdCount() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return storeLastFlushedSequenceId_.size(); + } else { + return storeLastFlushedSequenceIdBuilder_.getCount(); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreLastFlushedSequenceId(int index) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return storeLastFlushedSequenceId_.get(index); + } else { + return storeLastFlushedSequenceIdBuilder_.getMessage(index); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder setStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.set(index, value); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder setStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder addStoreLastFlushedSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(value); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder addStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(index, value); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder addStoreLastFlushedSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder addStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder addAllStoreLastFlushedSequenceId( + java.lang.Iterable values) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + super.addAll(values, storeLastFlushedSequenceId_); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder clearStoreLastFlushedSequenceId() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + storeLastFlushedSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.clear(); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public Builder removeStoreLastFlushedSequenceId(int index) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.remove(index); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.remove(index); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreLastFlushedSequenceIdBuilder( + int index) { + return getStoreLastFlushedSequenceIdFieldBuilder().getBuilder(index); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreLastFlushedSequenceIdOrBuilder( + int index) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return storeLastFlushedSequenceId_.get(index); } else { + return storeLastFlushedSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public java.util.List + getStoreLastFlushedSequenceIdOrBuilderList() { + if (storeLastFlushedSequenceIdBuilder_ != null) { + return storeLastFlushedSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreLastFlushedSequenceIdBuilder() { + return getStoreLastFlushedSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreLastFlushedSequenceIdBuilder( + int index) { + return getStoreLastFlushedSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
+       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
+       * 
+ */ + public java.util.List + getStoreLastFlushedSequenceIdBuilderList() { + return getStoreLastFlushedSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> + getStoreLastFlushedSequenceIdFieldBuilder() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + storeLastFlushedSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>( + storeLastFlushedSequenceId_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + storeLastFlushedSequenceId_ = null; + } + return storeLastFlushedSequenceIdBuilder_; + } + // @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdResponse) } @@ -8410,36 +8902,37 @@ public final class RegionServerStatusProtos { "lErrorRequest\022\033\n\006server\030\001 \002(\0132\013.ServerNa", "me\022\025\n\rerror_message\030\002 \002(\t\"\034\n\032ReportRSFat" + "alErrorResponse\"6\n\037GetLastFlushedSequenc" + - "eIdRequest\022\023\n\013region_name\030\001 \002(\014\"D\n GetLa" + + "eIdRequest\022\023\n\013region_name\030\001 \002(\014\"~\n GetLa" + "stFlushedSequenceIdResponse\022 \n\030last_flus" + - "hed_sequence_id\030\001 \002(\004\"\322\002\n\025RegionStateTra" + - "nsition\022>\n\017transition_code\030\001 \002(\0162%.Regio" + - "nStateTransition.TransitionCode\022 \n\013regio" + - "n_info\030\002 \003(\0132\013.RegionInfo\022\024\n\014open_seq_nu" + - "m\030\003 \001(\004\"\300\001\n\016TransitionCode\022\n\n\006OPENED\020\000\022\017" + - "\n\013FAILED_OPEN\020\001\022\n\n\006CLOSED\020\002\022\022\n\016READY_TO_", - "SPLIT\020\003\022\022\n\016READY_TO_MERGE\020\004\022\016\n\nSPLIT_PON" + - "R\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n\005SPLIT\020\007\022\n\n\006MERGED" + - "\020\010\022\022\n\016SPLIT_REVERTED\020\t\022\022\n\016MERGE_REVERTED" + - "\020\n\"m\n\"ReportRegionStateTransitionRequest" + - "\022\033\n\006server\030\001 \002(\0132\013.ServerName\022*\n\ntransit" + - "ion\030\002 \003(\0132\026.RegionStateTransition\"<\n#Rep" + - "ortRegionStateTransitionResponse\022\025\n\rerro" + - "r_message\030\001 \001(\t2\326\003\n\031RegionServerStatusSe" + - "rvice\022P\n\023RegionServerStartup\022\033.RegionSer" + - "verStartupRequest\032\034.RegionServerStartupR", - "esponse\022M\n\022RegionServerReport\022\032.RegionSe" + - "rverReportRequest\032\033.RegionServerReportRe" + - "sponse\022M\n\022ReportRSFatalError\022\032.ReportRSF" + - "atalErrorRequest\032\033.ReportRSFatalErrorRes" + - "ponse\022_\n\030GetLastFlushedSequenceId\022 .GetL" + - "astFlushedSequenceIdRequest\032!.GetLastFlu" + - "shedSequenceIdResponse\022h\n\033ReportRegionSt" + - "ateTransition\022#.ReportRegionStateTransit" + - "ionRequest\032$.ReportRegionStateTransition" + - "ResponseBN\n*org.apache.hadoop.hbase.prot", - "obuf.generatedB\030RegionServerStatusProtos" + - "H\001\210\001\001\240\001\001" + "hed_sequence_id\030\001 \002(\004\0228\n\036store_last_flus" + + "hed_sequence_id\030\002 \003(\0132\020.StoreSequenceId\"" + + "\322\002\n\025RegionStateTransition\022>\n\017transition_" + + "code\030\001 \002(\0162%.RegionStateTransition.Trans" + + "itionCode\022 \n\013region_info\030\002 \003(\0132\013.RegionI" + + "nfo\022\024\n\014open_seq_num\030\003 \001(\004\"\300\001\n\016Transition", + "Code\022\n\n\006OPENED\020\000\022\017\n\013FAILED_OPEN\020\001\022\n\n\006CLO" + + "SED\020\002\022\022\n\016READY_TO_SPLIT\020\003\022\022\n\016READY_TO_ME" + + "RGE\020\004\022\016\n\nSPLIT_PONR\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n" + + "\005SPLIT\020\007\022\n\n\006MERGED\020\010\022\022\n\016SPLIT_REVERTED\020\t" + + "\022\022\n\016MERGE_REVERTED\020\n\"m\n\"ReportRegionStat" + + "eTransitionRequest\022\033\n\006server\030\001 \002(\0132\013.Ser" + + "verName\022*\n\ntransition\030\002 \003(\0132\026.RegionStat" + + "eTransition\"<\n#ReportRegionStateTransiti" + + "onResponse\022\025\n\rerror_message\030\001 \001(\t2\326\003\n\031Re" + + "gionServerStatusService\022P\n\023RegionServerS", + "tartup\022\033.RegionServerStartupRequest\032\034.Re" + + "gionServerStartupResponse\022M\n\022RegionServe" + + "rReport\022\032.RegionServerReportRequest\032\033.Re" + + "gionServerReportResponse\022M\n\022ReportRSFata" + + "lError\022\032.ReportRSFatalErrorRequest\032\033.Rep" + + "ortRSFatalErrorResponse\022_\n\030GetLastFlushe" + + "dSequenceId\022 .GetLastFlushedSequenceIdRe" + + "quest\032!.GetLastFlushedSequenceIdResponse" + + "\022h\n\033ReportRegionStateTransition\022#.Report" + + "RegionStateTransitionRequest\032$.ReportReg", + "ionStateTransitionResponseBN\n*org.apache" + + ".hadoop.hbase.protobuf.generatedB\030Region" + + "ServerStatusProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8493,7 +8986,7 @@ public final class RegionServerStatusProtos { internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetLastFlushedSequenceIdResponse_descriptor, - new java.lang.String[] { "LastFlushedSequenceId", }); + new java.lang.String[] { "LastFlushedSequenceId", "StoreLastFlushedSequenceId", }); internal_static_RegionStateTransition_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_RegionStateTransition_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java index 294772e..70593b0 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java @@ -5092,7 +5092,7 @@ public final class VisibilityLabelsProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -5108,7 +5108,7 @@ public final class VisibilityLabelsProtos { getRegexBytes() { java.lang.Object ref = regex_; if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); regex_ = b; @@ -5414,7 +5414,7 @@ public final class VisibilityLabelsProtos { getRegexBytes() { java.lang.Object ref = regex_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); regex_ = b; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 5a1fbf1..f15e980 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -8196,1393 +8196,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:TableLock) } - public interface StoreSequenceIdOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes family_name = 1; - /** - * required bytes family_name = 1; - */ - boolean hasFamilyName(); - /** - * required bytes family_name = 1; - */ - com.google.protobuf.ByteString getFamilyName(); - - // required uint64 sequence_id = 2; - /** - * required uint64 sequence_id = 2; - */ - boolean hasSequenceId(); - /** - * required uint64 sequence_id = 2; - */ - long getSequenceId(); - } - /** - * Protobuf type {@code StoreSequenceId} - * - *
-   **
-   * sequence Id of a store
-   * 
- */ - public static final class StoreSequenceId extends - com.google.protobuf.GeneratedMessage - implements StoreSequenceIdOrBuilder { - // Use StoreSequenceId.newBuilder() to construct. - private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final StoreSequenceId defaultInstance; - public static StoreSequenceId getDefaultInstance() { - return defaultInstance; - } - - public StoreSequenceId getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private StoreSequenceId( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - familyName_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - sequenceId_ = input.readUInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public StoreSequenceId parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new StoreSequenceId(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required bytes family_name = 1; - public static final int FAMILY_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString familyName_; - /** - * required bytes family_name = 1; - */ - public boolean hasFamilyName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes family_name = 1; - */ - public com.google.protobuf.ByteString getFamilyName() { - return familyName_; - } - - // required uint64 sequence_id = 2; - public static final int SEQUENCE_ID_FIELD_NUMBER = 2; - private long sequenceId_; - /** - * required uint64 sequence_id = 2; - */ - public boolean hasSequenceId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required uint64 sequence_id = 2; - */ - public long getSequenceId() { - return sequenceId_; - } - - private void initFields() { - familyName_ = com.google.protobuf.ByteString.EMPTY; - sequenceId_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFamilyName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSequenceId()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, familyName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, sequenceId_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, familyName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, sequenceId_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) obj; - - boolean result = true; - result = result && (hasFamilyName() == other.hasFamilyName()); - if (hasFamilyName()) { - result = result && getFamilyName() - .equals(other.getFamilyName()); - } - result = result && (hasSequenceId() == other.hasSequenceId()); - if (hasSequenceId()) { - result = result && (getSequenceId() - == other.getSequenceId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFamilyName()) { - hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; - hash = (53 * hash) + getFamilyName().hashCode(); - } - if (hasSequenceId()) { - hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSequenceId()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code StoreSequenceId} - * - *
-     **
-     * sequence Id of a store
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - familyName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - sequenceId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.familyName_ = familyName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.sequenceId_ = sequenceId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()) return this; - if (other.hasFamilyName()) { - setFamilyName(other.getFamilyName()); - } - if (other.hasSequenceId()) { - setSequenceId(other.getSequenceId()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFamilyName()) { - - return false; - } - if (!hasSequenceId()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required bytes family_name = 1; - private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes family_name = 1; - */ - public boolean hasFamilyName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes family_name = 1; - */ - public com.google.protobuf.ByteString getFamilyName() { - return familyName_; - } - /** - * required bytes family_name = 1; - */ - public Builder setFamilyName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - familyName_ = value; - onChanged(); - return this; - } - /** - * required bytes family_name = 1; - */ - public Builder clearFamilyName() { - bitField0_ = (bitField0_ & ~0x00000001); - familyName_ = getDefaultInstance().getFamilyName(); - onChanged(); - return this; - } - - // required uint64 sequence_id = 2; - private long sequenceId_ ; - /** - * required uint64 sequence_id = 2; - */ - public boolean hasSequenceId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required uint64 sequence_id = 2; - */ - public long getSequenceId() { - return sequenceId_; - } - /** - * required uint64 sequence_id = 2; - */ - public Builder setSequenceId(long value) { - bitField0_ |= 0x00000002; - sequenceId_ = value; - onChanged(); - return this; - } - /** - * required uint64 sequence_id = 2; - */ - public Builder clearSequenceId() { - bitField0_ = (bitField0_ & ~0x00000002); - sequenceId_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:StoreSequenceId) - } - - static { - defaultInstance = new StoreSequenceId(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StoreSequenceId) - } - - public interface RegionStoreSequenceIdsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 last_flushed_sequence_id = 1; - /** - * required uint64 last_flushed_sequence_id = 1; - */ - boolean hasLastFlushedSequenceId(); - /** - * required uint64 last_flushed_sequence_id = 1; - */ - long getLastFlushedSequenceId(); - - // repeated .StoreSequenceId store_sequence_id = 2; - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - java.util.List - getStoreSequenceIdList(); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - int getStoreSequenceIdCount(); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - java.util.List - getStoreSequenceIdOrBuilderList(); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( - int index); - } - /** - * Protobuf type {@code RegionStoreSequenceIds} - * - *
-   **
-   * contains a sequence id of a region which should be the minimum of its store sequence ids and 
-   * list sequence ids of the region's stores
-   * 
- */ - public static final class RegionStoreSequenceIds extends - com.google.protobuf.GeneratedMessage - implements RegionStoreSequenceIdsOrBuilder { - // Use RegionStoreSequenceIds.newBuilder() to construct. - private RegionStoreSequenceIds(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RegionStoreSequenceIds(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RegionStoreSequenceIds defaultInstance; - public static RegionStoreSequenceIds getDefaultInstance() { - return defaultInstance; - } - - public RegionStoreSequenceIds getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RegionStoreSequenceIds( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - lastFlushedSequenceId_ = input.readUInt64(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - storeSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RegionStoreSequenceIds parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RegionStoreSequenceIds(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required uint64 last_flushed_sequence_id = 1; - public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1; - private long lastFlushedSequenceId_; - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public boolean hasLastFlushedSequenceId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public long getLastFlushedSequenceId() { - return lastFlushedSequenceId_; - } - - // repeated .StoreSequenceId store_sequence_id = 2; - public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2; - private java.util.List storeSequenceId_; - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List getStoreSequenceIdList() { - return storeSequenceId_; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List - getStoreSequenceIdOrBuilderList() { - return storeSequenceId_; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public int getStoreSequenceIdCount() { - return storeSequenceId_.size(); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) { - return storeSequenceId_.get(index); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( - int index) { - return storeSequenceId_.get(index); - } - - private void initFields() { - lastFlushedSequenceId_ = 0L; - storeSequenceId_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLastFlushedSequenceId()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getStoreSequenceIdCount(); i++) { - if (!getStoreSequenceId(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, lastFlushedSequenceId_); - } - for (int i = 0; i < storeSequenceId_.size(); i++) { - output.writeMessage(2, storeSequenceId_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, lastFlushedSequenceId_); - } - for (int i = 0; i < storeSequenceId_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, storeSequenceId_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) obj; - - boolean result = true; - result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId()); - if (hasLastFlushedSequenceId()) { - result = result && (getLastFlushedSequenceId() - == other.getLastFlushedSequenceId()); - } - result = result && getStoreSequenceIdList() - .equals(other.getStoreSequenceIdList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLastFlushedSequenceId()) { - hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); - } - if (getStoreSequenceIdCount() > 0) { - hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER; - hash = (53 * hash) + getStoreSequenceIdList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code RegionStoreSequenceIds} - * - *
-     **
-     * contains a sequence id of a region which should be the minimum of its store sequence ids and 
-     * list sequence ids of the region's stores
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIdsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getStoreSequenceIdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - lastFlushedSequenceId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - if (storeSequenceIdBuilder_ == null) { - storeSequenceId_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - storeSequenceIdBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.lastFlushedSequenceId_ = lastFlushedSequenceId_; - if (storeSequenceIdBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.storeSequenceId_ = storeSequenceId_; - } else { - result.storeSequenceId_ = storeSequenceIdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDefaultInstance()) return this; - if (other.hasLastFlushedSequenceId()) { - setLastFlushedSequenceId(other.getLastFlushedSequenceId()); - } - if (storeSequenceIdBuilder_ == null) { - if (!other.storeSequenceId_.isEmpty()) { - if (storeSequenceId_.isEmpty()) { - storeSequenceId_ = other.storeSequenceId_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.addAll(other.storeSequenceId_); - } - onChanged(); - } - } else { - if (!other.storeSequenceId_.isEmpty()) { - if (storeSequenceIdBuilder_.isEmpty()) { - storeSequenceIdBuilder_.dispose(); - storeSequenceIdBuilder_ = null; - storeSequenceId_ = other.storeSequenceId_; - bitField0_ = (bitField0_ & ~0x00000002); - storeSequenceIdBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getStoreSequenceIdFieldBuilder() : null; - } else { - storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLastFlushedSequenceId()) { - - return false; - } - for (int i = 0; i < getStoreSequenceIdCount(); i++) { - if (!getStoreSequenceId(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required uint64 last_flushed_sequence_id = 1; - private long lastFlushedSequenceId_ ; - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public boolean hasLastFlushedSequenceId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public long getLastFlushedSequenceId() { - return lastFlushedSequenceId_; - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public Builder setLastFlushedSequenceId(long value) { - bitField0_ |= 0x00000001; - lastFlushedSequenceId_ = value; - onChanged(); - return this; - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public Builder clearLastFlushedSequenceId() { - bitField0_ = (bitField0_ & ~0x00000001); - lastFlushedSequenceId_ = 0L; - onChanged(); - return this; - } - - // repeated .StoreSequenceId store_sequence_id = 2; - private java.util.List storeSequenceId_ = - java.util.Collections.emptyList(); - private void ensureStoreSequenceIdIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = new java.util.ArrayList(storeSequenceId_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_; - - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List getStoreSequenceIdList() { - if (storeSequenceIdBuilder_ == null) { - return java.util.Collections.unmodifiableList(storeSequenceId_); - } else { - return storeSequenceIdBuilder_.getMessageList(); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public int getStoreSequenceIdCount() { - if (storeSequenceIdBuilder_ == null) { - return storeSequenceId_.size(); - } else { - return storeSequenceIdBuilder_.getCount(); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) { - if (storeSequenceIdBuilder_ == null) { - return storeSequenceId_.get(index); - } else { - return storeSequenceIdBuilder_.getMessage(index); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder setStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { - if (storeSequenceIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.set(index, value); - onChanged(); - } else { - storeSequenceIdBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder setStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.set(index, builderForValue.build()); - onChanged(); - } else { - storeSequenceIdBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { - if (storeSequenceIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(value); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { - if (storeSequenceIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(index, value); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(builderForValue.build()); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(index, builderForValue.build()); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addAllStoreSequenceId( - java.lang.Iterable values) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - super.addAll(values, storeSequenceId_); - onChanged(); - } else { - storeSequenceIdBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder clearStoreSequenceId() { - if (storeSequenceIdBuilder_ == null) { - storeSequenceId_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - storeSequenceIdBuilder_.clear(); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder removeStoreSequenceId(int index) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.remove(index); - onChanged(); - } else { - storeSequenceIdBuilder_.remove(index); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder( - int index) { - return getStoreSequenceIdFieldBuilder().getBuilder(index); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( - int index) { - if (storeSequenceIdBuilder_ == null) { - return storeSequenceId_.get(index); } else { - return storeSequenceIdBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List - getStoreSequenceIdOrBuilderList() { - if (storeSequenceIdBuilder_ != null) { - return storeSequenceIdBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(storeSequenceId_); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() { - return getStoreSequenceIdFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder( - int index) { - return getStoreSequenceIdFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List - getStoreSequenceIdBuilderList() { - return getStoreSequenceIdFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> - getStoreSequenceIdFieldBuilder() { - if (storeSequenceIdBuilder_ == null) { - storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>( - storeSequenceId_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - storeSequenceId_ = null; - } - return storeSequenceIdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RegionStoreSequenceIds) - } - - static { - defaultInstance = new RegionStoreSequenceIds(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegionStoreSequenceIds) - } - private static com.google.protobuf.Descriptors.Descriptor internal_static_MetaRegionServer_descriptor; private static @@ -9633,16 +8246,6 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TableLock_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StoreSequenceId_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StoreSequenceId_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegionStoreSequenceIds_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegionStoreSequenceIds_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -9681,12 +8284,8 @@ public final class ZooKeeperProtos { "2\n.TableName\022\037\n\nlock_owner\030\002 \001(\0132\013.Serve" + "rName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 " + "\001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(" + - "\003\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002(", - "\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSeq" + - "uenceIds\022 \n\030last_flushed_sequence_id\030\001 \002" + - "(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSeq" + - "uenceIdBE\n*org.apache.hadoop.hbase.proto" + - "buf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" + "\003BE\n*org.apache.hadoop.hbase.protobuf.ge", + "neratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9753,18 +8352,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableLock_descriptor, new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }); - internal_static_StoreSequenceId_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_StoreSequenceId_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StoreSequenceId_descriptor, - new java.lang.String[] { "FamilyName", "SequenceId", }); - internal_static_RegionStoreSequenceIds_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_RegionStoreSequenceIds_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegionStoreSequenceIds_descriptor, - new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index 7e78395..b025afc 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -59,6 +59,23 @@ message RegionInTransition { required RegionState region_state = 2; } +/** + * sequence Id of a store + */ +message StoreSequenceId { + required bytes family_name = 1; + required uint64 sequence_id = 2; +} + +/** + * contains a sequence id of a region which should be the minimum of its store sequence ids and + * list sequence ids of the region's stores + */ +message RegionStoreSequenceIds { + required uint64 last_flushed_sequence_id = 1; + repeated StoreSequenceId store_sequence_id = 2; +} + message RegionLoad { /** the region specifier */ required RegionSpecifier region_specifier = 1; @@ -113,6 +130,9 @@ message RegionLoad { /** The current data locality for region in the regionserver */ optional float data_locality = 16; + + /** the most recent sequence Id of store from cache flush */ + repeated StoreSequenceId store_complete_sequence_id = 17; } /* Server-level protobufs */ diff --git a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto index 75e5ae4..855c2a1 100644 --- a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto @@ -74,8 +74,11 @@ message GetLastFlushedSequenceIdRequest { } message GetLastFlushedSequenceIdResponse { - /* the last WAL sequence id flushed from MemStore to HFile for the region */ + /** the last WAL sequence id flushed from MemStore to HFile for the region */ required uint64 last_flushed_sequence_id = 1; + + /** the last WAL sequence id flushed from MemStore to HFile for stores of the region */ + repeated StoreSequenceId store_last_flushed_sequence_id = 2; } message RegionStateTransition { diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index bac881b..617e2cf 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -152,20 +152,3 @@ message TableLock { optional string purpose = 5; optional int64 create_time = 6; } - -/** - * sequence Id of a store - */ -message StoreSequenceId { - required bytes family_name = 1; - required uint64 sequence_id = 2; -} - -/** - * contains a sequence id of a region which should be the minimum of its store sequence ids and - * list sequence ids of the region's stores - */ -message RegionStoreSequenceIds { - required uint64 last_flushed_sequence_id = 1; - repeated StoreSequenceId store_sequence_id = 2; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java index 707850d..82f00b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coordination; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.SplitLogTask; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 694ccff..299fa7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -33,7 +33,6 @@ import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateManager; @@ -43,17 +42,17 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective; import org.apache.hadoop.hbase.master.SplitLogManager.Task; import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; @@ -619,7 +618,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements try { long lastSequenceId = this.details.getMaster().getServerManager() - .getLastFlushedSequenceId(regionEncodeName.getBytes()); + .getLastFlushedSequenceId(regionEncodeName.getBytes()).getLastFlushedSequenceId(); /* * znode layout: .../region_id[last known flushed sequence id]/failed server[last known diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 9ea6bd7..c56d0ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -30,7 +30,6 @@ import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang.mutable.MutableInt; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,17 +37,18 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 6930bf3..af914db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -47,14 +47,12 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.*; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; @@ -270,8 +268,8 @@ public class MasterRpcServices extends RSRpcServices throw new ServiceException(ioe); } byte[] encodedRegionName = request.getRegionName().toByteArray(); - long seqId = master.serverManager.getLastFlushedSequenceId(encodedRegionName); - return ResponseConverter.buildGetLastFlushedSequenceIdResponse(seqId); + RegionStoreSequenceIds ids = master.serverManager.getLastFlushedSequenceId(encodedRegionName); + return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 796cc8a..9ae71c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -29,8 +29,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import java.util.SortedMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -38,15 +38,18 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.YouAreDeadException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; @@ -58,6 +61,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; @@ -68,6 +73,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; /** @@ -111,9 +117,13 @@ public class ServerManager { // Set if we are to shutdown the cluster. private volatile boolean clusterShutdown = false; - private final SortedMap flushedSequenceIdByRegion = + private final ConcurrentNavigableMap flushedSequenceIdByRegion = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + private final ConcurrentNavigableMap> + storeFlushedSequenceIdByRegion = + new ConcurrentSkipListMap>(Bytes.BYTES_COMPARATOR); + /** Map of registered servers to their current load */ private final ConcurrentHashMap onlineServers = new ConcurrentHashMap(); @@ -246,6 +256,18 @@ public class ServerManager { return sn; } + private ConcurrentNavigableMap getOrCreateStoreFlushedSequenceId( + byte[] regionName) { + ConcurrentNavigableMap storeFlushedSequenceId = + storeFlushedSequenceIdByRegion.get(regionName); + if (storeFlushedSequenceId != null) { + return storeFlushedSequenceId; + } + storeFlushedSequenceId = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + ConcurrentNavigableMap alreadyPut = + storeFlushedSequenceIdByRegion.putIfAbsent(regionName, storeFlushedSequenceId); + return alreadyPut == null ? storeFlushedSequenceId : alreadyPut; + } /** * Updates last flushed sequence Ids for the regions on server sn * @param sn @@ -257,18 +279,25 @@ public class ServerManager { byte[] encodedRegionName = Bytes.toBytes(HRegionInfo.encodeRegionName(entry.getKey())); Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName); long l = entry.getValue().getCompleteSequenceId(); - if (existingValue != null) { - if (l != -1 && l < existingValue) { - LOG.warn("RegionServer " + sn + - " indicates a last flushed sequence id (" + entry.getValue() + - ") that is less than the previous last flushed sequence id (" + - existingValue + ") for region " + - Bytes.toString(entry.getKey()) + " Ignoring."); - - continue; // Don't let smaller sequence ids override greater sequence ids. + // Don't let smaller sequence ids override greater sequence ids. + if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue)) { + flushedSequenceIdByRegion.put(encodedRegionName, l); + } else if (l != HConstants.NO_SEQNUM && l < existingValue) { + LOG.warn("RegionServer " + sn + " indicates a last flushed sequence id (" + + l + ") that is less than the previous last flushed sequence id (" + + existingValue + ") for region " + Bytes.toString(entry.getKey()) + " Ignoring."); + } + ConcurrentNavigableMap storeFlushedSequenceId = + getOrCreateStoreFlushedSequenceId(encodedRegionName); + for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) { + byte[] family = storeSeqId.getFamilyName().toByteArray(); + existingValue = storeFlushedSequenceId.get(family); + l = storeSeqId.getSequenceId(); + // Don't let smaller sequence ids override greater sequence ids. + if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue.longValue())) { + storeFlushedSequenceId.put(family, l); } } - flushedSequenceIdByRegion.put(encodedRegionName, l); } } @@ -407,12 +436,20 @@ public class ServerManager { this.rsAdmins.remove(serverName); } - public long getLastFlushedSequenceId(byte[] encodedRegionName) { - long seqId = -1L; - if (flushedSequenceIdByRegion.containsKey(encodedRegionName)) { - seqId = flushedSequenceIdByRegion.get(encodedRegionName); + public RegionStoreSequenceIds getLastFlushedSequenceId(byte[] encodedRegionName) { + RegionStoreSequenceIds.Builder builder = RegionStoreSequenceIds.newBuilder(); + Long seqId = flushedSequenceIdByRegion.get(encodedRegionName); + builder.setLastFlushedSequenceId(seqId != null ? seqId.longValue() : HConstants.NO_SEQNUM); + Map storeFlushedSequenceId = + storeFlushedSequenceIdByRegion.get(encodedRegionName); + if (storeFlushedSequenceId != null) { + for (Map.Entry entry : storeFlushedSequenceId.entrySet()) { + builder.addStoreSequenceId(StoreSequenceId.newBuilder() + .setFamilyName(ByteString.copyFrom(entry.getKey())) + .setSequenceId(entry.getValue().longValue()).build()); + } } - return seqId; + return builder.build(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6cf2ce3..7a15857 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -127,6 +127,8 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; @@ -168,6 +170,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.io.Closeables; +import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; @@ -235,9 +238,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * The max sequence id of flushed data on this region. Used doing some rough calculations on * whether time to flush or not. */ - protected volatile long maxFlushedSeqId = -1L; + private volatile long maxFlushedSeqId = HConstants.NO_SEQNUM; /** + * Record the sequence id of last flush operation. + */ + private volatile long lastFlushOpSeqId = HConstants.NO_SEQNUM; + /** * Region scoped edit sequence Id. Edits to this region are GUARANTEED to appear in the WAL * file in this sequence id's order; i.e. edit #2 will be in the WAL after edit #1. * Its default value is -1L. This default is used as a marker to indicate @@ -1496,6 +1503,23 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return Collections.min(lastStoreFlushTimeMap.values()); } + RegionLoad.Builder setCompleteSequenceId(RegionLoad.Builder regionLoadBldr) { + long lastFlushOpSeqIdLocal = this.lastFlushOpSeqId; + byte[] encodedRegionName = this.getRegionInfo().getEncodedNameAsBytes(); + regionLoadBldr.clearStoreCompleteSequenceId(); + for (byte[] familyName : this.stores.keySet()) { + long oldestUnflushedSeqId = this.wal.getEarliestMemstoreSeqNum(encodedRegionName, familyName); + // no oldestUnflushedSeqId means no data has written to the store after last flush, so we use + // lastFlushOpSeqId as complete sequence id for the store. + regionLoadBldr.addStoreCompleteSequenceId(StoreSequenceId + .newBuilder() + .setFamilyName(ByteString.copyFrom(familyName)) + .setSequenceId( + oldestUnflushedSeqId < 0 ? lastFlushOpSeqIdLocal : oldestUnflushedSeqId - 1).build()); + } + return regionLoadBldr.setCompleteSequenceId(this.maxFlushedSeqId); + } + ////////////////////////////////////////////////////////////////////////////// // HRegion maintenance. // @@ -1893,12 +1917,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // wal can be null replaying edits. if (wal != null) { w = mvcc.beginMemstoreInsert(); - long flushSeqId = getNextSequenceId(wal); + long flushOpSeqId = getNextSequenceId(wal); FlushResult flushResult = new FlushResult( - FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, flushSeqId, "Nothing to flush"); - w.setWriteNumber(flushSeqId); + FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, flushOpSeqId, "Nothing to flush"); + w.setWriteNumber(flushOpSeqId); mvcc.waitForPreviousTransactionsComplete(w); w = null; + this.maxFlushedSeqId = this.lastFlushOpSeqId = flushOpSeqId; return flushResult; } else { return new FlushResult(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, @@ -2132,6 +2157,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // Update the oldest unflushed sequence id for region. this.maxFlushedSeqId = flushedSeqId; + // Record flush operation sequence id. + this.lastFlushOpSeqId = flushOpSeqId; + // C. Finally notify anyone waiting on memstore to clear: // e.g. checkResources(). synchronized (this) { @@ -6107,7 +6135,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // ClassSize.OBJECT + ClassSize.ARRAY + 44 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + - (11 * Bytes.SIZEOF_LONG) + + (12 * Bytes.SIZEOF_LONG) + 4 * Bytes.SIZEOF_BOOLEAN); // woefully out of date - currently missing: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 4669f8f..f5e80ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -108,12 +108,14 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; @@ -1378,7 +1380,8 @@ public class HRegionServer extends HasThread implements } regionSpecifier.setType(RegionSpecifierType.REGION_NAME); regionSpecifier.setValue(ByteStringer.wrap(name)); - regionLoadBldr.setRegionSpecifier(regionSpecifier.build()) + r.setCompleteSequenceId(regionLoadBldr) + .setRegionSpecifier(regionSpecifier.build()) .setStores(stores) .setStorefiles(storefiles) .setStoreUncompressedSizeMB(storeUncompressedSizeMB) @@ -1392,7 +1395,6 @@ public class HRegionServer extends HasThread implements .setWriteRequestsCount(r.writeRequestsCount.get()) .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) - .setCompleteSequenceId(r.maxFlushedSeqId) .setDataLocality(dataLocality); return regionLoadBldr.build(); @@ -2161,30 +2163,30 @@ public class HRegionServer extends HasThread implements } @Override - public long getLastSequenceId(byte[] encodedRegionName) { - long lastFlushedSequenceId = -1L; + public RegionStoreSequenceIds getLastSequenceId(byte[] encodedRegionName) { try { - GetLastFlushedSequenceIdRequest req = RequestConverter - .buildGetLastFlushedSequenceIdRequest(encodedRegionName); + GetLastFlushedSequenceIdRequest req = + RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName); RegionServerStatusService.BlockingInterface rss = rssStub; if (rss == null) { // Try to connect one more time createRegionServerStatusStub(); rss = rssStub; if (rss == null) { // Still no luck, we tried - LOG.warn("Unable to connect to the master to check " - + "the last flushed sequence id"); - return -1L; + LOG.warn("Unable to connect to the master to check " + "the last flushed sequence id"); + return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM) + .build(); } } - lastFlushedSequenceId = rss.getLastFlushedSequenceId(null, req) - .getLastFlushedSequenceId(); + GetLastFlushedSequenceIdResponse resp = rss.getLastFlushedSequenceId(null, req); + return RegionStoreSequenceIds.newBuilder() + .setLastFlushedSequenceId(resp.getLastFlushedSequenceId()) + .addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build(); } catch (ServiceException e) { - lastFlushedSequenceId = -1l; - LOG.warn("Unable to connect to the master to check " - + "the last flushed sequence id", e); + LOG.warn("Unable to connect to the master to check the last flushed sequence id", e); + return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM) + .build(); } - return lastFlushedSequenceId; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java index 98f0985..81db9fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java @@ -19,15 +19,18 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; /** * Last flushed sequence Ids for the regions on region server */ @InterfaceAudience.Private public interface LastSequenceId { + /** * @param encodedRegionName Encoded region name * @return Last flushed sequence Id for region or -1 if it can't be determined */ - long getLastSequenceId(byte[] encodedRegionName); + RegionStoreSequenceIds getLastSequenceId(byte[] encodedRegionName); + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index d2ba69d..248b71a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1684,7 +1684,7 @@ public class FSHLog implements WAL { oldestUnflushedStoreSequenceIdsOfRegion = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); ConcurrentMap alreadyPut = - oldestUnflushedStoreSequenceIds.put(encodedRegionName, + oldestUnflushedStoreSequenceIds.putIfAbsent(encodedRegionName, oldestUnflushedStoreSequenceIdsOfRegion); return alreadyPut == null ? oldestUnflushedStoreSequenceIdsOfRegion : alreadyPut; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 147a13d..1ea9d4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -56,6 +56,7 @@ class FSWALEntry extends Entry { private final transient HTableDescriptor htd; private final transient HRegionInfo hri; private final transient List memstoreCells; + private final Set familyNames; FSWALEntry(final long sequence, final WALKey key, final WALEdit edit, final AtomicLong referenceToRegionSequenceId, final boolean inMemstore, @@ -67,6 +68,23 @@ class FSWALEntry extends Entry { this.hri = hri; this.sequence = sequence; this.memstoreCells = memstoreCells; + if (inMemstore) { + // construct familyNames here to reduce the work of log sinker. + ArrayList cells = this.getEdit().getCells(); + if (CollectionUtils.isEmpty(cells)) { + this.familyNames = Collections. emptySet(); + } else { + Set familySet = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); + for (Cell cell : cells) { + if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + familySet.add(CellUtil.cloneFamily(cell)); + } + } + this.familyNames = Collections.unmodifiableSet(familySet); + } + } else { + this.familyNames = Collections. emptySet(); + } } public String toString() { @@ -118,16 +136,6 @@ class FSWALEntry extends Entry { * @return the family names which are effected by this edit. */ Set getFamilyNames() { - ArrayList cells = this.getEdit().getCells(); - if (CollectionUtils.isEmpty(cells)) { - return Collections.emptySet(); - } - Set familySet = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); - for (Cell cell : cells) { - if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { - familySet.add(CellUtil.cloneFamily(cell)); - } - } - return familySet; + return familyNames; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 2ddc9d1..fcae5f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -49,13 +49,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; @@ -73,9 +68,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.Tag; -import org.apache.hadoop.hbase.TagRewriteCell; -import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -83,9 +76,9 @@ import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.master.SplitLogManager; @@ -98,16 +91,19 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LastSequenceId; -import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.wal.WAL.Reader; -import org.apache.hadoop.hbase.wal.WALProvider.Writer; +// imports for things that haven't moved from regionserver.wal yet. +import org.apache.hadoop.hbase.regionserver.wal.FSHLog; +import org.apache.hadoop.hbase.regionserver.wal.HLogKey; +import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.regionserver.wal.WALEditsReplaySink; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.ClassSize; @@ -115,17 +111,17 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.hadoop.hbase.wal.WAL.Reader; +import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ipc.RemoteException; -// imports for things that haven't moved from regionserver.wal yet. -import org.apache.hadoop.hbase.regionserver.wal.FSHLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogKey; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.regionserver.wal.WALEditsReplaySink; -import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; -import org.apache.hadoop.hbase.regionserver.wal.WALUtil; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.protobuf.ServiceException; /** * This class is responsible for splitting up a bunch of regionserver commit log @@ -340,7 +336,14 @@ public class WALSplitter { lastFlushedSequenceId = ids.getLastFlushedSequenceId(); } } else if (sequenceIdChecker != null) { - lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region); + RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region); + Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { + maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), + storeSeqId.getSequenceId()); + } + regionMaxSeqIdInStores.put(key, maxSeqIdInStores); + lastFlushedSequenceId = ids.getLastFlushedSequenceId(); } if (lastFlushedSequenceId == null) { lastFlushedSequenceId = -1L; @@ -1486,6 +1489,29 @@ public class WALSplitter { return (new WriterAndPath(regionedits, w)); } + private void filterCellByStore(Entry logEntry) { + Map maxSeqIdInStores = + regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); + if (maxSeqIdInStores == null || maxSeqIdInStores.isEmpty()) { + return; + } + List skippedCells = new ArrayList(); + for (Cell cell : logEntry.getEdit().getCells()) { + if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + byte[] family = CellUtil.cloneFamily(cell); + Long maxSeqId = maxSeqIdInStores.get(family); + // Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade, + // or the master was crashed before and we can not get the information. + if (maxSeqId != null && maxSeqId.longValue() >= logEntry.getKey().getLogSeqNum()) { + skippedCells.add(cell); + } + } + } + if (!skippedCells.isEmpty()) { + logEntry.getEdit().getCells().removeAll(skippedCells); + } + } + @Override public void append(RegionEntryBuffer buffer) throws IOException { List entries = buffer.entryBuffer; @@ -1510,7 +1536,10 @@ public class WALSplitter { return; } } - wap.w.append(logEntry); + filterCellByStore(logEntry); + if (!logEntry.getEdit().isEmpty()) { + wap.w.append(logEntry); + } this.updateRegionMaximumEditLogSeqNum(logEntry); editsCount++; } @@ -1702,8 +1731,8 @@ public class WALSplitter { HConnection hconn = this.getConnectionByTableName(table); for (Cell cell : cells) { - byte[] row = cell.getRow(); - byte[] family = cell.getFamily(); + byte[] row = CellUtil.cloneRow(cell); + byte[] family = CellUtil.cloneFamily(cell); boolean isCompactionEntry = false; if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { CompactionDescriptor compaction = WALEdit.getCompaction(cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index 78b3eed..a85e684 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -26,12 +26,12 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.zookeeper.KeeperException; /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java index 0f7c281..e4be517 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -86,14 +87,20 @@ public class TestGetLastFlushedSequenceId { } assertNotNull(region); Thread.sleep(2000); - assertEquals( - HConstants.NO_SEQNUM, - testUtil.getHBaseCluster().getMaster() - .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes())); + RegionStoreSequenceIds ids = + testUtil.getHBaseCluster().getMaster() + .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId()); + long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId(); + assertTrue(storeSequenceId > 0); testUtil.getHBaseAdmin().flush(tableName); Thread.sleep(2000); - assertTrue(testUtil.getHBaseCluster().getMaster() - .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()) > 0); + ids = + testUtil.getHBaseCluster().getMaster() + .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId, + ids.getLastFlushedSequenceId() > storeSequenceId); + assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId()); table.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 3161e8f..e43ac25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -48,14 +48,6 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; -import org.apache.hadoop.hbase.regionserver.DefaultMemStore; -import org.apache.hadoop.hbase.regionserver.FlushAllStoresPolicy; -import org.apache.hadoop.hbase.regionserver.FlushLargeStoresPolicy; -import org.apache.hadoop.hbase.regionserver.FlushPolicy; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -134,10 +126,8 @@ public class TestPerColumnFamilyFlush { // Set up the configuration Configuration conf = HBaseConfiguration.create(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, - FlushLargeStoresPolicy.class.getName()); - conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, - 100 * 1024); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, 100 * 1024); // Intialize the HRegion initHRegion("testSelectiveFlushWhenEnabled", conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 @@ -337,13 +327,11 @@ public class TestPerColumnFamilyFlush { return null; } - @Test - public void testLogReplay() throws Exception { + private void testLogReplayInternal() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 20000); // Carefully chosen limits so that the memstore just flushes when we're done - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, - FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, 10000); final int numRegionServers = 4; TEST_UTIL.startMiniCluster(numRegionServers); @@ -389,8 +377,8 @@ public class TestPerColumnFamilyFlush { // CF1 Should have been flushed assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf1MemstoreSize); // CF2 and CF3 shouldn't have been flushed. - assertTrue(cf2MemstoreSize > 0); - assertTrue(cf3MemstoreSize > 0); + assertTrue(cf2MemstoreSize > DefaultMemStore.DEEP_OVERHEAD); + assertTrue(cf3MemstoreSize > DefaultMemStore.DEEP_OVERHEAD); assertEquals(totalMemstoreSize + 2 * DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSize + cf3MemstoreSize); @@ -416,10 +404,16 @@ public class TestPerColumnFamilyFlush { verifyEdit(3, i, table); } } - TEST_UTIL.shutdownMiniCluster(); } + @Test + public void testLogReplay() throws Exception { + TEST_UTIL.getConfiguration().setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 1000); + testLogReplayInternal(); + } + // Test Log Replay with Distributed Replay on. // In distributed log replay, the log splitters ask the master for the // last flushed sequence id for a region. This test would ensure that we @@ -427,7 +421,7 @@ public class TestPerColumnFamilyFlush { @Test public void testLogReplayWithDistributedReplay() throws Exception { TEST_UTIL.getConfiguration().setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); - testLogReplay(); + testLogReplayInternal(); } /** @@ -440,8 +434,7 @@ public class TestPerColumnFamilyFlush { public void testFlushingWhenLogRolling() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300000); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, - FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, 100000); // Also, let us try real hard to get a log roll to happen. @@ -530,8 +523,7 @@ public class TestPerColumnFamilyFlush { Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllStoresPolicy.class.getName()); - conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, - 400 * 1024); + conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, 400 * 1024); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); @@ -561,8 +553,7 @@ public class TestPerColumnFamilyFlush { TEST_UTIL.shutdownMiniCluster(); LOG.info("==============Test with selective flush enabled==============="); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, - FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseAdmin().createNamespace( NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); -- 1.9.1