diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 6f5051b..6ca10c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -22,9 +22,12 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; @@ -179,6 +182,24 @@ public class ServerLoad { } /** + * Call directly from client such as hbase shell + * @return the list of ReplicationLoadSource + */ + public List getReplicationLoadSourceList() { + return ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList()); + } + + /** + * Call directly from client such as hbase shell + * @return ReplicationLoadSink + */ + public ReplicationLoadSink getReplicationLoadSink() { + if (serverLoad.hasReplLoadSink()) return ProtobufUtil.toReplicationLoadSink(serverLoad + .getReplLoadSink()); + else return null; + } + + /** * Originally, this method factored in the effect of requests going to the * server as well. However, this does not interact very well with the current * region rebalancing code, which only factors number of regions. For the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 2a05f97..947ce9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Col import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; @@ -115,6 +116,8 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -129,6 +132,7 @@ import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.security.token.Token; import java.io.ByteArrayOutputStream; @@ -2794,4 +2798,25 @@ public final class ProtobufUtil { } return result; } + + public static ReplicationLoadSink toReplicationLoadSink( + ClusterStatusProtos.ReplicationLoadSink cls) { + return new ReplicationLoadSink(cls.getAgeOfLastAppliedOp(), cls.getTimeStampsOfLastAppliedOp()); + } + + public static ReplicationLoadSource toReplicationLoadSource( + ClusterStatusProtos.ReplicationLoadSource cls) { + return new ReplicationLoadSource(cls.getPeerID(), cls.getAgeOfLastShippedOp(), + cls.getSizeOfLogQueue(), cls.getTimeStampOfLastShippedOp(), cls.getReplicationLag()); + } + + public static List toReplicationLoadSourceList( + List clsList) { + ArrayList rlsList = new ArrayList(); + for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) { + rlsList.add(toReplicationLoadSource(cls)); + } + return rlsList; + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java new file mode 100644 index 0000000..9b32687 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A HBase ReplicationLoad to present MetricsSink information + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ReplicationLoadSink { + private long ageOfLastAppliedOp; + private long timeStampsOfLastAppliedOp; + + public ReplicationLoadSink(long age, long timeStamp) { + this.ageOfLastAppliedOp = age; + this.timeStampsOfLastAppliedOp = timeStamp; + } + + public long getAgeOfLastAppliedOp() { + return this.ageOfLastAppliedOp; + } + + public long getTimeStampsOfLastAppliedOp() { + return this.timeStampsOfLastAppliedOp; + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java new file mode 100644 index 0000000..62086ad --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +/** + * A HBase ReplicationLoad to present MetricsSource information + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ReplicationLoadSource { + private String peerID; + private long ageOfLastShippedOp; + private int sizeOfLogQueue; + private long timeStampOfLastShippedOp; + private long replicationLag; + + public ReplicationLoadSource(String id, long age, int size, long timeStamp, long lag) { + this.peerID = id; + this.ageOfLastShippedOp = age; + this.sizeOfLogQueue = size; + this.timeStampOfLastShippedOp = timeStamp; + this.replicationLag = lag; + } + + public String getPeerID() { + return this.peerID; + } + + public long getAgeOfLastShippedOp() { + return this.ageOfLastShippedOp; + } + + public long getSizeOfLogQueue() { + return this.sizeOfLogQueue; + } + + public long getTimeStampOfLastShippedOp() { + return this.timeStampOfLastShippedOp; + } + + public long getReplicationLag() { + return this.replicationLag; + } +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index c558485..287cccb 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -4226,273 +4226,52 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionLoad) } - public interface ServerLoadOrBuilder + public interface ReplicationLoadSinkOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional uint32 number_of_requests = 1; - /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
- */ - boolean hasNumberOfRequests(); - /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
- */ - int getNumberOfRequests(); - - // optional uint32 total_number_of_requests = 2; - /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
- */ - boolean hasTotalNumberOfRequests(); - /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
- */ - int getTotalNumberOfRequests(); - - // optional uint32 used_heap_MB = 3; - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - boolean hasUsedHeapMB(); - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - int getUsedHeapMB(); - - // optional uint32 max_heap_MB = 4; - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - boolean hasMaxHeapMB(); - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - int getMaxHeapMB(); - - // repeated .RegionLoad region_loads = 5; - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - java.util.List - getRegionLoadsList(); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - int getRegionLoadsCount(); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - java.util.List - getRegionLoadsOrBuilderList(); - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( - int index); - - // repeated .Coprocessor coprocessors = 6; - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - java.util.List - getCoprocessorsList(); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - int getCoprocessorsCount(); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - java.util.List - getCoprocessorsOrBuilderList(); - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( - int index); - - // optional uint64 report_start_time = 7; - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - boolean hasReportStartTime(); - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - long getReportStartTime(); - - // optional uint64 report_end_time = 8; + // required uint64 ageOfLastAppliedOp = 1; /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - boolean hasReportEndTime(); + boolean hasAgeOfLastAppliedOp(); /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - long getReportEndTime(); + long getAgeOfLastAppliedOp(); - // optional uint32 info_server_port = 9; + // required uint64 timeStampsOfLastAppliedOp = 2; /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - boolean hasInfoServerPort(); + boolean hasTimeStampsOfLastAppliedOp(); /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - int getInfoServerPort(); + long getTimeStampsOfLastAppliedOp(); } /** - * Protobuf type {@code ServerLoad} + * Protobuf type {@code ReplicationLoadSink} + * + *
+   * Server-level protobufs
+   * 
*/ - public static final class ServerLoad extends + public static final class ReplicationLoadSink extends com.google.protobuf.GeneratedMessage - implements ServerLoadOrBuilder { - // Use ServerLoad.newBuilder() to construct. - private ServerLoad(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ReplicationLoadSinkOrBuilder { + // Use ReplicationLoadSink.newBuilder() to construct. + private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ServerLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ServerLoad defaultInstance; - public static ServerLoad getDefaultInstance() { + private static final ReplicationLoadSink defaultInstance; + public static ReplicationLoadSink getDefaultInstance() { return defaultInstance; } - public ServerLoad getDefaultInstanceForType() { + public ReplicationLoadSink getDefaultInstanceForType() { return defaultInstance; } @@ -4502,7 +4281,7 @@ public final class ClusterStatusProtos { getUnknownFields() { return this.unknownFields; } - private ServerLoad( + private ReplicationLoadSink( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4527,53 +4306,12 @@ public final class ClusterStatusProtos { } case 8: { bitField0_ |= 0x00000001; - numberOfRequests_ = input.readUInt32(); + ageOfLastAppliedOp_ = input.readUInt64(); break; } case 16: { bitField0_ |= 0x00000002; - totalNumberOfRequests_ = input.readUInt32(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - usedHeapMB_ = input.readUInt32(); - break; - } - case 32: { - bitField0_ |= 0x00000008; - maxHeapMB_ = input.readUInt32(); - break; - } - case 42: { - if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { - regionLoads_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000010; - } - regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry)); - break; - } - case 50: { - if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { - coprocessors_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000020; - } - coprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry)); - break; - } - case 56: { - bitField0_ |= 0x00000010; - reportStartTime_ = input.readUInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000020; - reportEndTime_ = input.readUInt64(); - break; - } - case 72: { - bitField0_ |= 0x00000040; - infoServerPort_ = input.readUInt32(); + timeStampsOfLastAppliedOp_ = input.readUInt64(); break; } } @@ -4584,1957 +4322,4484 @@ public final class ClusterStatusProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { - regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); - } - if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { - coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ServerLoad parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSink parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ServerLoad(input, extensionRegistry); + return new ReplicationLoadSink(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // optional uint32 number_of_requests = 1; - public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1; - private int numberOfRequests_; + // required uint64 ageOfLastAppliedOp = 1; + public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1; + private long ageOfLastAppliedOp_; /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - public boolean hasNumberOfRequests() { + public boolean hasAgeOfLastAppliedOp() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional uint32 number_of_requests = 1; - * - *
-     ** Number of requests since last report. 
-     * 
+ * required uint64 ageOfLastAppliedOp = 1; */ - public int getNumberOfRequests() { - return numberOfRequests_; + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; } - // optional uint32 total_number_of_requests = 2; - public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2; - private int totalNumberOfRequests_; + // required uint64 timeStampsOfLastAppliedOp = 2; + public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2; + private long timeStampsOfLastAppliedOp_; /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - public boolean hasTotalNumberOfRequests() { + public boolean hasTimeStampsOfLastAppliedOp() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional uint32 total_number_of_requests = 2; - * - *
-     ** Total Number of requests from the start of the region server. 
-     * 
+ * required uint64 timeStampsOfLastAppliedOp = 2; */ - public int getTotalNumberOfRequests() { - return totalNumberOfRequests_; + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; } - // optional uint32 used_heap_MB = 3; - public static final int USED_HEAP_MB_FIELD_NUMBER = 3; - private int usedHeapMB_; - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - public boolean hasUsedHeapMB() { - return ((bitField0_ & 0x00000004) == 0x00000004); + private void initFields() { + ageOfLastAppliedOp_ = 0L; + timeStampsOfLastAppliedOp_ = 0L; } - /** - * optional uint32 used_heap_MB = 3; - * - *
-     ** the amount of used heap, in MB. 
-     * 
- */ - public int getUsedHeapMB() { - return usedHeapMB_; + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAgeOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; } - // optional uint32 max_heap_MB = 4; - public static final int MAX_HEAP_MB_FIELD_NUMBER = 4; - private int maxHeapMB_; - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - public boolean hasMaxHeapMB() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timeStampsOfLastAppliedOp_); + } + getUnknownFields().writeTo(output); } - /** - * optional uint32 max_heap_MB = 4; - * - *
-     ** the maximum allowable size of the heap, in MB. 
-     * 
- */ - public int getMaxHeapMB() { - return maxHeapMB_; + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, timeStampsOfLastAppliedOp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; } - // repeated .RegionLoad region_loads = 5; - public static final int REGION_LOADS_FIELD_NUMBER = 5; - private java.util.List regionLoads_; - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public java.util.List getRegionLoadsList() { - return regionLoads_; + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public java.util.List - getRegionLoadsOrBuilderList() { - return regionLoads_; + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj; + + boolean result = true; + result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp()); + if (hasAgeOfLastAppliedOp()) { + result = result && (getAgeOfLastAppliedOp() + == other.getAgeOfLastAppliedOp()); + } + result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp()); + if (hasTimeStampsOfLastAppliedOp()) { + result = result && (getTimeStampsOfLastAppliedOp() + == other.getTimeStampsOfLastAppliedOp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public int getRegionLoadsCount() { - return regionLoads_.size(); + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAgeOfLastAppliedOp()) { + hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp()); + } + if (hasTimeStampsOfLastAppliedOp()) { + hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { - return regionLoads_.get(index); + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - /** - * repeated .RegionLoad region_loads = 5; - * - *
-     ** Information on the load of individual regions. 
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( - int index) { - return regionLoads_.get(index); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - - // repeated .Coprocessor coprocessors = 6; - public static final int COPROCESSORS_FIELD_NUMBER = 6; - private java.util.List coprocessors_; - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public java.util.List getCoprocessorsList() { - return coprocessors_; + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public java.util.List - getCoprocessorsOrBuilderList() { - return coprocessors_; + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public int getCoprocessorsCount() { - return coprocessors_.size(); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { - return coprocessors_.get(index); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); } - /** - * repeated .Coprocessor coprocessors = 6; - * - *
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( - int index) { - return coprocessors_.get(index); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); } - - // optional uint64 report_start_time = 7; - public static final int REPORT_START_TIME_FIELD_NUMBER = 7; - private long reportStartTime_; - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public boolean hasReportStartTime() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); } - /** - * optional uint64 report_start_time = 7; - * - *
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public long getReportStartTime() { - return reportStartTime_; + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); } - - // optional uint64 report_end_time = 8; - public static final int REPORT_END_TIME_FIELD_NUMBER = 8; - private long reportEndTime_; - /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public boolean hasReportEndTime() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); } - /** - * optional uint64 report_end_time = 8; - * - *
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * 
- */ - public long getReportEndTime() { - return reportEndTime_; + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) { + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } - // optional uint32 info_server_port = 9; - public static final int INFO_SERVER_PORT_FIELD_NUMBER = 9; - private int infoServerPort_; - /** - * optional uint32 info_server_port = 9; - * - *
-     **
-     * The port number that this region server is hosing an info server on.
-     * 
- */ - public boolean hasInfoServerPort() { - return ((bitField0_ & 0x00000040) == 0x00000040); + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } /** - * optional uint32 info_server_port = 9; + * Protobuf type {@code ReplicationLoadSink} * *
-     **
-     * The port number that this region server is hosing an info server on.
+     * Server-level protobufs
      * 
*/ - public int getInfoServerPort() { - return infoServerPort_; - } - - private void initFields() { - numberOfRequests_ = 0; - totalNumberOfRequests_ = 0; - usedHeapMB_ = 0; - maxHeapMB_ = 0; - regionLoads_ = java.util.Collections.emptyList(); - coprocessors_ = java.util.Collections.emptyList(); - reportStartTime_ = 0L; - reportEndTime_ = 0L; - infoServerPort_ = 0; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - for (int i = 0; i < getRegionLoadsCount(); i++) { - if (!getRegionLoads(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getCoprocessorsCount(); i++) { - if (!getCoprocessors(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; } - memoizedIsInitialized = 1; - return true; - } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(1, numberOfRequests_); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, totalNumberOfRequests_); + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt32(3, usedHeapMB_); + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt32(4, maxHeapMB_); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } } - for (int i = 0; i < regionLoads_.size(); i++) { - output.writeMessage(5, regionLoads_.get(i)); + private static Builder create() { + return new Builder(); } - for (int i = 0; i < coprocessors_.size(); i++) { - output.writeMessage(6, coprocessors_.get(i)); + + public Builder clear() { + super.clear(); + ageOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + timeStampsOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(7, reportStartTime_); + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(8, reportEndTime_); + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt32(9, infoServerPort_); + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(1, numberOfRequests_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, totalNumberOfRequests_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(3, usedHeapMB_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, maxHeapMB_); - } - for (int i = 0; i < regionLoads_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, regionLoads_.get(i)); - } - for (int i = 0; i < coprocessors_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, coprocessors_.get(i)); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, reportStartTime_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, reportEndTime_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(9, infoServerPort_); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)) { - return super.equals(obj); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) obj; - boolean result = true; - result = result && (hasNumberOfRequests() == other.hasNumberOfRequests()); - if (hasNumberOfRequests()) { - result = result && (getNumberOfRequests() - == other.getNumberOfRequests()); - } - result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests()); - if (hasTotalNumberOfRequests()) { - result = result && (getTotalNumberOfRequests() - == other.getTotalNumberOfRequests()); - } - result = result && (hasUsedHeapMB() == other.hasUsedHeapMB()); - if (hasUsedHeapMB()) { - result = result && (getUsedHeapMB() - == other.getUsedHeapMB()); - } - result = result && (hasMaxHeapMB() == other.hasMaxHeapMB()); - if (hasMaxHeapMB()) { - result = result && (getMaxHeapMB() - == other.getMaxHeapMB()); - } - result = result && getRegionLoadsList() - .equals(other.getRegionLoadsList()); - result = result && getCoprocessorsList() - .equals(other.getCoprocessorsList()); - result = result && (hasReportStartTime() == other.hasReportStartTime()); - if (hasReportStartTime()) { - result = result && (getReportStartTime() - == other.getReportStartTime()); - } - result = result && (hasReportEndTime() == other.hasReportEndTime()); - if (hasReportEndTime()) { - result = result && (getReportEndTime() - == other.getReportEndTime()); + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other); + } else { + super.mergeFrom(other); + return this; + } } - result = result && (hasInfoServerPort() == other.hasInfoServerPort()); - if (hasInfoServerPort()) { - result = result && (getInfoServerPort() - == other.getInfoServerPort()); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this; + if (other.hasAgeOfLastAppliedOp()) { + setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp()); + } + if (other.hasTimeStampsOfLastAppliedOp()) { + setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + public final boolean isInitialized() { + if (!hasAgeOfLastAppliedOp()) { + + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + + return false; + } + return true; } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasNumberOfRequests()) { - hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER; - hash = (53 * hash) + getNumberOfRequests(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } - if (hasTotalNumberOfRequests()) { - hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER; - hash = (53 * hash) + getTotalNumberOfRequests(); + private int bitField0_; + + // required uint64 ageOfLastAppliedOp = 1; + private long ageOfLastAppliedOp_ ; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public boolean hasAgeOfLastAppliedOp() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - if (hasUsedHeapMB()) { - hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER; - hash = (53 * hash) + getUsedHeapMB(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; } - if (hasMaxHeapMB()) { - hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER; - hash = (53 * hash) + getMaxHeapMB(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder setAgeOfLastAppliedOp(long value) { + bitField0_ |= 0x00000001; + ageOfLastAppliedOp_ = value; + onChanged(); + return this; } - if (getRegionLoadsCount() > 0) { - hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER; - hash = (53 * hash) + getRegionLoadsList().hashCode(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder clearAgeOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastAppliedOp_ = 0L; + onChanged(); + return this; } - if (getCoprocessorsCount() > 0) { - hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; - hash = (53 * hash) + getCoprocessorsList().hashCode(); + + // required uint64 timeStampsOfLastAppliedOp = 2; + private long timeStampsOfLastAppliedOp_ ; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public boolean hasTimeStampsOfLastAppliedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - if (hasReportStartTime()) { - hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getReportStartTime()); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; } - if (hasReportEndTime()) { - hash = (37 * hash) + REPORT_END_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getReportEndTime()); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder setTimeStampsOfLastAppliedOp(long value) { + bitField0_ |= 0x00000002; + timeStampsOfLastAppliedOp_ = value; + onChanged(); + return this; } - if (hasInfoServerPort()) { - hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER; - hash = (53 * hash) + getInfoServerPort(); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder clearTimeStampsOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + timeStampsOfLastAppliedOp_ = 0L; + onChanged(); + return this; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + // @@protoc_insertion_point(builder_scope:ReplicationLoadSink) } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + + static { + defaultInstance = new ReplicationLoadSink(true); + defaultInstance.initFields(); } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); + + // @@protoc_insertion_point(class_scope:ReplicationLoadSink) + } + + public interface ReplicationLoadSourceOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string peerID = 1; + /** + * required string peerID = 1; + */ + boolean hasPeerID(); + /** + * required string peerID = 1; + */ + java.lang.String getPeerID(); + /** + * required string peerID = 1; + */ + com.google.protobuf.ByteString + getPeerIDBytes(); + + // required uint64 ageOfLastShippedOp = 2; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + boolean hasAgeOfLastShippedOp(); + /** + * required uint64 ageOfLastShippedOp = 2; + */ + long getAgeOfLastShippedOp(); + + // required uint32 sizeOfLogQueue = 3; + /** + * required uint32 sizeOfLogQueue = 3; + */ + boolean hasSizeOfLogQueue(); + /** + * required uint32 sizeOfLogQueue = 3; + */ + int getSizeOfLogQueue(); + + // required uint64 timeStampOfLastShippedOp = 4; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + boolean hasTimeStampOfLastShippedOp(); + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + long getTimeStampOfLastShippedOp(); + + // required uint64 replicationLag = 5; + /** + * required uint64 replicationLag = 5; + */ + boolean hasReplicationLag(); + /** + * required uint64 replicationLag = 5; + */ + long getReplicationLag(); + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class ReplicationLoadSource extends + com.google.protobuf.GeneratedMessage + implements ReplicationLoadSourceOrBuilder { + // Use ReplicationLoadSource.newBuilder() to construct. + private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); + private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReplicationLoadSource defaultInstance; + public static ReplicationLoadSource getDefaultInstance() { + return defaultInstance; } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + + public ReplicationLoadSource getDefaultInstanceForType() { + return defaultInstance; } - public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReplicationLoadSource( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + peerID_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = input.readUInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + replicationLag_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; } - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad prototype) { - return newBuilder().mergeFrom(prototype); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); } - public Builder toBuilder() { return newBuilder(this); } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSource parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationLoadSource(input, extensionRegistry); + } + }; @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + public com.google.protobuf.Parser getParserForType() { + return PARSER; } + + private int bitField0_; + // required string peerID = 1; + public static final int PEERID_FIELD_NUMBER = 1; + private java.lang.Object peerID_; /** - * Protobuf type {@code ServerLoad} + * required string peerID = 1; */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + peerID_ = s; + } + return s; } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class); + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } - // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } + // required uint64 ageOfLastShippedOp = 2; + public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2; + private long ageOfLastShippedOp_; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegionLoadsFieldBuilder(); - getCoprocessorsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } + // required uint32 sizeOfLogQueue = 3; + public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3; + private int sizeOfLogQueue_; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } - public Builder clear() { - super.clear(); - numberOfRequests_ = 0; - bitField0_ = (bitField0_ & ~0x00000001); - totalNumberOfRequests_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - usedHeapMB_ = 0; - bitField0_ = (bitField0_ & ~0x00000004); - maxHeapMB_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); - if (regionLoadsBuilder_ == null) { - regionLoads_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000010); - } else { - regionLoadsBuilder_.clear(); - } - if (coprocessorsBuilder_ == null) { - coprocessors_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); - } else { - coprocessorsBuilder_.clear(); - } - reportStartTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - reportEndTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - infoServerPort_ = 0; - bitField0_ = (bitField0_ & ~0x00000100); - return this; - } + // required uint64 timeStampOfLastShippedOp = 4; + public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4; + private long timeStampOfLastShippedOp_; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } - public Builder clone() { - return create().mergeFrom(buildPartial()); - } + // required uint64 replicationLag = 5; + public static final int REPLICATIONLAG_FIELD_NUMBER = 5; + private long replicationLag_; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; - } + private void initFields() { + peerID_ = ""; + ageOfLastShippedOp_ = 0L; + sizeOfLogQueue_ = 0; + timeStampOfLastShippedOp_ = 0L; + replicationLag_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); + if (!hasPeerID()) { + memoizedIsInitialized = 0; + return false; } - - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad build() { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = buildPartial(); - if (!result.isInitialized()) { + if (!hasAgeOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSizeOfLogQueue()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasReplicationLag()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(5, replicationLag_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, replicationLag_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj; + + boolean result = true; + result = result && (hasPeerID() == other.hasPeerID()); + if (hasPeerID()) { + result = result && getPeerID() + .equals(other.getPeerID()); + } + result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp()); + if (hasAgeOfLastShippedOp()) { + result = result && (getAgeOfLastShippedOp() + == other.getAgeOfLastShippedOp()); + } + result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue()); + if (hasSizeOfLogQueue()) { + result = result && (getSizeOfLogQueue() + == other.getSizeOfLogQueue()); + } + result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp()); + if (hasTimeStampOfLastShippedOp()) { + result = result && (getTimeStampOfLastShippedOp() + == other.getTimeStampOfLastShippedOp()); + } + result = result && (hasReplicationLag() == other.hasReplicationLag()); + if (hasReplicationLag()) { + result = result && (getReplicationLag() + == other.getReplicationLag()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPeerID()) { + hash = (37 * hash) + PEERID_FIELD_NUMBER; + hash = (53 * hash) + getPeerID().hashCode(); + } + if (hasAgeOfLastShippedOp()) { + hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastShippedOp()); + } + if (hasSizeOfLogQueue()) { + hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER; + hash = (53 * hash) + getSizeOfLogQueue(); + } + if (hasTimeStampOfLastShippedOp()) { + hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp()); + } + if (hasReplicationLag()) { + hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReplicationLag()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + peerID_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + sizeOfLogQueue_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + timeStampOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + replicationLag_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial(); + if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad(this); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.numberOfRequests_ = numberOfRequests_; + result.peerID_ = peerID_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.totalNumberOfRequests_ = totalNumberOfRequests_; + result.ageOfLastShippedOp_ = ageOfLastShippedOp_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.usedHeapMB_ = usedHeapMB_; + result.sizeOfLogQueue_ = sizeOfLogQueue_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - result.maxHeapMB_ = maxHeapMB_; + result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.replicationLag_ = replicationLag_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this; + if (other.hasPeerID()) { + bitField0_ |= 0x00000001; + peerID_ = other.peerID_; + onChanged(); + } + if (other.hasAgeOfLastShippedOp()) { + setAgeOfLastShippedOp(other.getAgeOfLastShippedOp()); + } + if (other.hasSizeOfLogQueue()) { + setSizeOfLogQueue(other.getSizeOfLogQueue()); + } + if (other.hasTimeStampOfLastShippedOp()) { + setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp()); + } + if (other.hasReplicationLag()) { + setReplicationLag(other.getReplicationLag()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPeerID()) { + + return false; + } + if (!hasAgeOfLastShippedOp()) { + + return false; + } + if (!hasSizeOfLogQueue()) { + + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + + return false; + } + if (!hasReplicationLag()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string peerID = 1; + private java.lang.Object peerID_ = ""; + /** + * required string peerID = 1; + */ + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + peerID_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string peerID = 1; + */ + public Builder setPeerID( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder clearPeerID() { + bitField0_ = (bitField0_ & ~0x00000001); + peerID_ = getDefaultInstance().getPeerID(); + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder setPeerIDBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + + // required uint64 ageOfLastShippedOp = 2; + private long ageOfLastShippedOp_ ; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder setAgeOfLastShippedOp(long value) { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder clearAgeOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + ageOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint32 sizeOfLogQueue = 3; + private int sizeOfLogQueue_ ; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder setSizeOfLogQueue(int value) { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = value; + onChanged(); + return this; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder clearSizeOfLogQueue() { + bitField0_ = (bitField0_ & ~0x00000004); + sizeOfLogQueue_ = 0; + onChanged(); + return this; + } + + // required uint64 timeStampOfLastShippedOp = 4; + private long timeStampOfLastShippedOp_ ; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder setTimeStampOfLastShippedOp(long value) { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder clearTimeStampOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000008); + timeStampOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint64 replicationLag = 5; + private long replicationLag_ ; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder setReplicationLag(long value) { + bitField0_ |= 0x00000010; + replicationLag_ = value; + onChanged(); + return this; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder clearReplicationLag() { + bitField0_ = (bitField0_ & ~0x00000010); + replicationLag_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationLoadSource) + } + + static { + defaultInstance = new ReplicationLoadSource(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationLoadSource) + } + + public interface ServerLoadOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint32 number_of_requests = 1; + /** + * optional uint32 number_of_requests = 1; + * + *
+     ** Number of requests since last report. 
+     * 
+ */ + boolean hasNumberOfRequests(); + /** + * optional uint32 number_of_requests = 1; + * + *
+     ** Number of requests since last report. 
+     * 
+ */ + int getNumberOfRequests(); + + // optional uint32 total_number_of_requests = 2; + /** + * optional uint32 total_number_of_requests = 2; + * + *
+     ** Total Number of requests from the start of the region server. 
+     * 
+ */ + boolean hasTotalNumberOfRequests(); + /** + * optional uint32 total_number_of_requests = 2; + * + *
+     ** Total Number of requests from the start of the region server. 
+     * 
+ */ + int getTotalNumberOfRequests(); + + // optional uint32 used_heap_MB = 3; + /** + * optional uint32 used_heap_MB = 3; + * + *
+     ** the amount of used heap, in MB. 
+     * 
+ */ + boolean hasUsedHeapMB(); + /** + * optional uint32 used_heap_MB = 3; + * + *
+     ** the amount of used heap, in MB. 
+     * 
+ */ + int getUsedHeapMB(); + + // optional uint32 max_heap_MB = 4; + /** + * optional uint32 max_heap_MB = 4; + * + *
+     ** the maximum allowable size of the heap, in MB. 
+     * 
+ */ + boolean hasMaxHeapMB(); + /** + * optional uint32 max_heap_MB = 4; + * + *
+     ** the maximum allowable size of the heap, in MB. 
+     * 
+ */ + int getMaxHeapMB(); + + // repeated .RegionLoad region_loads = 5; + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + java.util.List + getRegionLoadsList(); + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index); + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + int getRegionLoadsCount(); + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + java.util.List + getRegionLoadsOrBuilderList(); + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index); + + // repeated .Coprocessor coprocessors = 6; + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + java.util.List + getCoprocessorsList(); + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index); + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + int getCoprocessorsCount(); + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + java.util.List + getCoprocessorsOrBuilderList(); + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index); + + // optional uint64 report_start_time = 7; + /** + * optional uint64 report_start_time = 7; + * + *
+     **
+     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + boolean hasReportStartTime(); + /** + * optional uint64 report_start_time = 7; + * + *
+     **
+     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + long getReportStartTime(); + + // optional uint64 report_end_time = 8; + /** + * optional uint64 report_end_time = 8; + * + *
+     **
+     * Time when report was generated.
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + boolean hasReportEndTime(); + /** + * optional uint64 report_end_time = 8; + * + *
+     **
+     * Time when report was generated.
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + long getReportEndTime(); + + // optional uint32 info_server_port = 9; + /** + * optional uint32 info_server_port = 9; + * + *
+     **
+     * The port number that this region server is hosing an info server on.
+     * 
+ */ + boolean hasInfoServerPort(); + /** + * optional uint32 info_server_port = 9; + * + *
+     **
+     * The port number that this region server is hosing an info server on.
+     * 
+ */ + int getInfoServerPort(); + + // repeated .ReplicationLoadSource replLoadSource = 10; + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + java.util.List + getReplLoadSourceList(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + int getReplLoadSourceCount(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + java.util.List + getReplLoadSourceOrBuilderList(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index); + + // optional .ReplicationLoadSink replLoadSink = 11; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server .
+     * 
+ */ + boolean hasReplLoadSink(); + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server .
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink(); + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server .
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder(); + } + /** + * Protobuf type {@code ServerLoad} + */ + public static final class ServerLoad extends + com.google.protobuf.GeneratedMessage + implements ServerLoadOrBuilder { + // Use ServerLoad.newBuilder() to construct. + private ServerLoad(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ServerLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ServerLoad defaultInstance; + public static ServerLoad getDefaultInstance() { + return defaultInstance; + } + + public ServerLoad getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ServerLoad( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + numberOfRequests_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + totalNumberOfRequests_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + usedHeapMB_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + maxHeapMB_ = input.readUInt32(); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + regionLoads_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry)); + break; + } + case 50: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + coprocessors_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000020; + } + coprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry)); + break; + } + case 56: { + bitField0_ |= 0x00000010; + reportStartTime_ = input.readUInt64(); + break; + } + case 64: { + bitField0_ |= 0x00000020; + reportEndTime_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000040; + infoServerPort_ = input.readUInt32(); + break; + } + case 82: { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000200; + } + replLoadSource_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.PARSER, extensionRegistry)); + break; + } + case 90: { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = replLoadSink_.toBuilder(); + } + replLoadSink_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(replLoadSink_); + replLoadSink_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); + } + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ServerLoad parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ServerLoad(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint32 number_of_requests = 1; + public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1; + private int numberOfRequests_; + /** + * optional uint32 number_of_requests = 1; + * + *
+     ** Number of requests since last report. 
+     * 
+ */ + public boolean hasNumberOfRequests() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint32 number_of_requests = 1; + * + *
+     ** Number of requests since last report. 
+     * 
+ */ + public int getNumberOfRequests() { + return numberOfRequests_; + } + + // optional uint32 total_number_of_requests = 2; + public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2; + private int totalNumberOfRequests_; + /** + * optional uint32 total_number_of_requests = 2; + * + *
+     ** Total Number of requests from the start of the region server. 
+     * 
+ */ + public boolean hasTotalNumberOfRequests() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint32 total_number_of_requests = 2; + * + *
+     ** Total Number of requests from the start of the region server. 
+     * 
+ */ + public int getTotalNumberOfRequests() { + return totalNumberOfRequests_; + } + + // optional uint32 used_heap_MB = 3; + public static final int USED_HEAP_MB_FIELD_NUMBER = 3; + private int usedHeapMB_; + /** + * optional uint32 used_heap_MB = 3; + * + *
+     ** the amount of used heap, in MB. 
+     * 
+ */ + public boolean hasUsedHeapMB() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint32 used_heap_MB = 3; + * + *
+     ** the amount of used heap, in MB. 
+     * 
+ */ + public int getUsedHeapMB() { + return usedHeapMB_; + } + + // optional uint32 max_heap_MB = 4; + public static final int MAX_HEAP_MB_FIELD_NUMBER = 4; + private int maxHeapMB_; + /** + * optional uint32 max_heap_MB = 4; + * + *
+     ** the maximum allowable size of the heap, in MB. 
+     * 
+ */ + public boolean hasMaxHeapMB() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint32 max_heap_MB = 4; + * + *
+     ** the maximum allowable size of the heap, in MB. 
+     * 
+ */ + public int getMaxHeapMB() { + return maxHeapMB_; + } + + // repeated .RegionLoad region_loads = 5; + public static final int REGION_LOADS_FIELD_NUMBER = 5; + private java.util.List regionLoads_; + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + public java.util.List getRegionLoadsList() { + return regionLoads_; + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + public java.util.List + getRegionLoadsOrBuilderList() { + return regionLoads_; + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + public int getRegionLoadsCount() { + return regionLoads_.size(); + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { + return regionLoads_.get(index); + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+     ** Information on the load of individual regions. 
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index) { + return regionLoads_.get(index); + } + + // repeated .Coprocessor coprocessors = 6; + public static final int COPROCESSORS_FIELD_NUMBER = 6; + private java.util.List coprocessors_; + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + public java.util.List getCoprocessorsList() { + return coprocessors_; + } + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + public java.util.List + getCoprocessorsOrBuilderList() { + return coprocessors_; + } + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + public int getCoprocessorsCount() { + return coprocessors_.size(); + } + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { + return coprocessors_.get(index); + } + /** + * repeated .Coprocessor coprocessors = 6; + * + *
+     **
+     * Regionserver-level coprocessors, e.g., WALObserver implementations.
+     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+     * objects.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index) { + return coprocessors_.get(index); + } + + // optional uint64 report_start_time = 7; + public static final int REPORT_START_TIME_FIELD_NUMBER = 7; + private long reportStartTime_; + /** + * optional uint64 report_start_time = 7; + * + *
+     **
+     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + public boolean hasReportStartTime() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional uint64 report_start_time = 7; + * + *
+     **
+     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + public long getReportStartTime() { + return reportStartTime_; + } + + // optional uint64 report_end_time = 8; + public static final int REPORT_END_TIME_FIELD_NUMBER = 8; + private long reportEndTime_; + /** + * optional uint64 report_end_time = 8; + * + *
+     **
+     * Time when report was generated.
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + public boolean hasReportEndTime() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional uint64 report_end_time = 8; + * + *
+     **
+     * Time when report was generated.
+     * time is measured as the difference, measured in milliseconds, between the current time
+     * and midnight, January 1, 1970 UTC.
+     * 
+ */ + public long getReportEndTime() { + return reportEndTime_; + } + + // optional uint32 info_server_port = 9; + public static final int INFO_SERVER_PORT_FIELD_NUMBER = 9; + private int infoServerPort_; + /** + * optional uint32 info_server_port = 9; + * + *
+     **
+     * The port number that this region server is hosing an info server on.
+     * 
+ */ + public boolean hasInfoServerPort() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint32 info_server_port = 9; + * + *
+     **
+     * The port number that this region server is hosing an info server on.
+     * 
+ */ + public int getInfoServerPort() { + return infoServerPort_; + } + + // repeated .ReplicationLoadSource replLoadSource = 10; + public static final int REPLLOADSOURCE_FIELD_NUMBER = 10; + private java.util.List replLoadSource_; + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + public java.util.List getReplLoadSourceList() { + return replLoadSource_; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + public java.util.List + getReplLoadSourceOrBuilderList() { + return replLoadSource_; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + public int getReplLoadSourceCount() { + return replLoadSource_.size(); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) { + return replLoadSource_.get(index); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
+     **
+     * The replicationLoadSource for the replication Source status of this region server .
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index) { + return replLoadSource_.get(index); + } + + // optional .ReplicationLoadSink replLoadSink = 11; + public static final int REPLLOADSINK_FIELD_NUMBER = 11; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server .
+     * 
+ */ + public boolean hasReplLoadSink() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server .
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() { + return replLoadSink_; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
+     **
+     * The replicationLoadSink for the replication Sink status of this region server .
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() { + return replLoadSink_; + } + + private void initFields() { + numberOfRequests_ = 0; + totalNumberOfRequests_ = 0; + usedHeapMB_ = 0; + maxHeapMB_ = 0; + regionLoads_ = java.util.Collections.emptyList(); + coprocessors_ = java.util.Collections.emptyList(); + reportStartTime_ = 0L; + reportEndTime_ = 0L; + infoServerPort_ = 0; + replLoadSource_ = java.util.Collections.emptyList(); + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getRegionLoadsCount(); i++) { + if (!getRegionLoads(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getCoprocessorsCount(); i++) { + if (!getCoprocessors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getReplLoadSourceCount(); i++) { + if (!getReplLoadSource(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasReplLoadSink()) { + if (!getReplLoadSink().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, numberOfRequests_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, totalNumberOfRequests_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, usedHeapMB_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt32(4, maxHeapMB_); + } + for (int i = 0; i < regionLoads_.size(); i++) { + output.writeMessage(5, regionLoads_.get(i)); + } + for (int i = 0; i < coprocessors_.size(); i++) { + output.writeMessage(6, coprocessors_.get(i)); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(7, reportStartTime_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeUInt64(8, reportEndTime_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt32(9, infoServerPort_); + } + for (int i = 0; i < replLoadSource_.size(); i++) { + output.writeMessage(10, replLoadSource_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(11, replLoadSink_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, numberOfRequests_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, totalNumberOfRequests_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, usedHeapMB_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(4, maxHeapMB_); + } + for (int i = 0; i < regionLoads_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, regionLoads_.get(i)); + } + for (int i = 0; i < coprocessors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, coprocessors_.get(i)); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(7, reportStartTime_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, reportEndTime_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(9, infoServerPort_); + } + for (int i = 0; i < replLoadSource_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, replLoadSource_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(11, replLoadSink_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) obj; + + boolean result = true; + result = result && (hasNumberOfRequests() == other.hasNumberOfRequests()); + if (hasNumberOfRequests()) { + result = result && (getNumberOfRequests() + == other.getNumberOfRequests()); + } + result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests()); + if (hasTotalNumberOfRequests()) { + result = result && (getTotalNumberOfRequests() + == other.getTotalNumberOfRequests()); + } + result = result && (hasUsedHeapMB() == other.hasUsedHeapMB()); + if (hasUsedHeapMB()) { + result = result && (getUsedHeapMB() + == other.getUsedHeapMB()); + } + result = result && (hasMaxHeapMB() == other.hasMaxHeapMB()); + if (hasMaxHeapMB()) { + result = result && (getMaxHeapMB() + == other.getMaxHeapMB()); + } + result = result && getRegionLoadsList() + .equals(other.getRegionLoadsList()); + result = result && getCoprocessorsList() + .equals(other.getCoprocessorsList()); + result = result && (hasReportStartTime() == other.hasReportStartTime()); + if (hasReportStartTime()) { + result = result && (getReportStartTime() + == other.getReportStartTime()); + } + result = result && (hasReportEndTime() == other.hasReportEndTime()); + if (hasReportEndTime()) { + result = result && (getReportEndTime() + == other.getReportEndTime()); + } + result = result && (hasInfoServerPort() == other.hasInfoServerPort()); + if (hasInfoServerPort()) { + result = result && (getInfoServerPort() + == other.getInfoServerPort()); + } + result = result && getReplLoadSourceList() + .equals(other.getReplLoadSourceList()); + result = result && (hasReplLoadSink() == other.hasReplLoadSink()); + if (hasReplLoadSink()) { + result = result && getReplLoadSink() + .equals(other.getReplLoadSink()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNumberOfRequests()) { + hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getNumberOfRequests(); + } + if (hasTotalNumberOfRequests()) { + hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER; + hash = (53 * hash) + getTotalNumberOfRequests(); + } + if (hasUsedHeapMB()) { + hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER; + hash = (53 * hash) + getUsedHeapMB(); + } + if (hasMaxHeapMB()) { + hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER; + hash = (53 * hash) + getMaxHeapMB(); + } + if (getRegionLoadsCount() > 0) { + hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER; + hash = (53 * hash) + getRegionLoadsList().hashCode(); + } + if (getCoprocessorsCount() > 0) { + hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; + hash = (53 * hash) + getCoprocessorsList().hashCode(); + } + if (hasReportStartTime()) { + hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReportStartTime()); + } + if (hasReportEndTime()) { + hash = (37 * hash) + REPORT_END_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReportEndTime()); + } + if (hasInfoServerPort()) { + hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER; + hash = (53 * hash) + getInfoServerPort(); + } + if (getReplLoadSourceCount() > 0) { + hash = (37 * hash) + REPLLOADSOURCE_FIELD_NUMBER; + hash = (53 * hash) + getReplLoadSourceList().hashCode(); + } + if (hasReplLoadSink()) { + hash = (37 * hash) + REPLLOADSINK_FIELD_NUMBER; + hash = (53 * hash) + getReplLoadSink().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ServerLoad} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionLoadsFieldBuilder(); + getCoprocessorsFieldBuilder(); + getReplLoadSourceFieldBuilder(); + getReplLoadSinkFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + numberOfRequests_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + totalNumberOfRequests_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + usedHeapMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + maxHeapMB_ = 0; + bitField0_ = (bitField0_ & ~0x00000008); + if (regionLoadsBuilder_ == null) { + regionLoads_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + regionLoadsBuilder_.clear(); + } + if (coprocessorsBuilder_ == null) { + coprocessors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + coprocessorsBuilder_.clear(); + } + reportStartTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000040); + reportEndTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + infoServerPort_ = 0; + bitField0_ = (bitField0_ & ~0x00000100); + if (replLoadSourceBuilder_ == null) { + replLoadSource_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + } else { + replLoadSourceBuilder_.clear(); + } + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + } else { + replLoadSinkBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.numberOfRequests_ = numberOfRequests_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.totalNumberOfRequests_ = totalNumberOfRequests_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.usedHeapMB_ = usedHeapMB_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.maxHeapMB_ = maxHeapMB_; + if (regionLoadsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.regionLoads_ = regionLoads_; + } else { + result.regionLoads_ = regionLoadsBuilder_.build(); + } + if (coprocessorsBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.coprocessors_ = coprocessors_; + } else { + result.coprocessors_ = coprocessorsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000010; + } + result.reportStartTime_ = reportStartTime_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000020; + } + result.reportEndTime_ = reportEndTime_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000040; + } + result.infoServerPort_ = infoServerPort_; + if (replLoadSourceBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.replLoadSource_ = replLoadSource_; + } else { + result.replLoadSource_ = replLoadSourceBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000080; + } + if (replLoadSinkBuilder_ == null) { + result.replLoadSink_ = replLoadSink_; + } else { + result.replLoadSink_ = replLoadSinkBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) return this; + if (other.hasNumberOfRequests()) { + setNumberOfRequests(other.getNumberOfRequests()); + } + if (other.hasTotalNumberOfRequests()) { + setTotalNumberOfRequests(other.getTotalNumberOfRequests()); + } + if (other.hasUsedHeapMB()) { + setUsedHeapMB(other.getUsedHeapMB()); + } + if (other.hasMaxHeapMB()) { + setMaxHeapMB(other.getMaxHeapMB()); + } + if (regionLoadsBuilder_ == null) { + if (!other.regionLoads_.isEmpty()) { + if (regionLoads_.isEmpty()) { + regionLoads_ = other.regionLoads_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureRegionLoadsIsMutable(); + regionLoads_.addAll(other.regionLoads_); + } + onChanged(); + } + } else { + if (!other.regionLoads_.isEmpty()) { + if (regionLoadsBuilder_.isEmpty()) { + regionLoadsBuilder_.dispose(); + regionLoadsBuilder_ = null; + regionLoads_ = other.regionLoads_; + bitField0_ = (bitField0_ & ~0x00000010); + regionLoadsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionLoadsFieldBuilder() : null; + } else { + regionLoadsBuilder_.addAllMessages(other.regionLoads_); + } + } + } + if (coprocessorsBuilder_ == null) { + if (!other.coprocessors_.isEmpty()) { + if (coprocessors_.isEmpty()) { + coprocessors_ = other.coprocessors_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureCoprocessorsIsMutable(); + coprocessors_.addAll(other.coprocessors_); + } + onChanged(); + } + } else { + if (!other.coprocessors_.isEmpty()) { + if (coprocessorsBuilder_.isEmpty()) { + coprocessorsBuilder_.dispose(); + coprocessorsBuilder_ = null; + coprocessors_ = other.coprocessors_; + bitField0_ = (bitField0_ & ~0x00000020); + coprocessorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getCoprocessorsFieldBuilder() : null; + } else { + coprocessorsBuilder_.addAllMessages(other.coprocessors_); + } + } + } + if (other.hasReportStartTime()) { + setReportStartTime(other.getReportStartTime()); + } + if (other.hasReportEndTime()) { + setReportEndTime(other.getReportEndTime()); + } + if (other.hasInfoServerPort()) { + setInfoServerPort(other.getInfoServerPort()); + } + if (replLoadSourceBuilder_ == null) { + if (!other.replLoadSource_.isEmpty()) { + if (replLoadSource_.isEmpty()) { + replLoadSource_ = other.replLoadSource_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureReplLoadSourceIsMutable(); + replLoadSource_.addAll(other.replLoadSource_); + } + onChanged(); + } + } else { + if (!other.replLoadSource_.isEmpty()) { + if (replLoadSourceBuilder_.isEmpty()) { + replLoadSourceBuilder_.dispose(); + replLoadSourceBuilder_ = null; + replLoadSource_ = other.replLoadSource_; + bitField0_ = (bitField0_ & ~0x00000200); + replLoadSourceBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getReplLoadSourceFieldBuilder() : null; + } else { + replLoadSourceBuilder_.addAllMessages(other.replLoadSource_); + } + } + } + if (other.hasReplLoadSink()) { + mergeReplLoadSink(other.getReplLoadSink()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRegionLoadsCount(); i++) { + if (!getRegionLoads(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getCoprocessorsCount(); i++) { + if (!getCoprocessors(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getReplLoadSourceCount(); i++) { + if (!getReplLoadSource(i).isInitialized()) { + + return false; + } + } + if (hasReplLoadSink()) { + if (!getReplLoadSink().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint32 number_of_requests = 1; + private int numberOfRequests_ ; + /** + * optional uint32 number_of_requests = 1; + * + *
+       ** Number of requests since last report. 
+       * 
+ */ + public boolean hasNumberOfRequests() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint32 number_of_requests = 1; + * + *
+       ** Number of requests since last report. 
+       * 
+ */ + public int getNumberOfRequests() { + return numberOfRequests_; + } + /** + * optional uint32 number_of_requests = 1; + * + *
+       ** Number of requests since last report. 
+       * 
+ */ + public Builder setNumberOfRequests(int value) { + bitField0_ |= 0x00000001; + numberOfRequests_ = value; + onChanged(); + return this; + } + /** + * optional uint32 number_of_requests = 1; + * + *
+       ** Number of requests since last report. 
+       * 
+ */ + public Builder clearNumberOfRequests() { + bitField0_ = (bitField0_ & ~0x00000001); + numberOfRequests_ = 0; + onChanged(); + return this; + } + + // optional uint32 total_number_of_requests = 2; + private int totalNumberOfRequests_ ; + /** + * optional uint32 total_number_of_requests = 2; + * + *
+       ** Total Number of requests from the start of the region server. 
+       * 
+ */ + public boolean hasTotalNumberOfRequests() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint32 total_number_of_requests = 2; + * + *
+       ** Total Number of requests from the start of the region server. 
+       * 
+ */ + public int getTotalNumberOfRequests() { + return totalNumberOfRequests_; + } + /** + * optional uint32 total_number_of_requests = 2; + * + *
+       ** Total Number of requests from the start of the region server. 
+       * 
+ */ + public Builder setTotalNumberOfRequests(int value) { + bitField0_ |= 0x00000002; + totalNumberOfRequests_ = value; + onChanged(); + return this; + } + /** + * optional uint32 total_number_of_requests = 2; + * + *
+       ** Total Number of requests from the start of the region server. 
+       * 
+ */ + public Builder clearTotalNumberOfRequests() { + bitField0_ = (bitField0_ & ~0x00000002); + totalNumberOfRequests_ = 0; + onChanged(); + return this; + } + + // optional uint32 used_heap_MB = 3; + private int usedHeapMB_ ; + /** + * optional uint32 used_heap_MB = 3; + * + *
+       ** the amount of used heap, in MB. 
+       * 
+ */ + public boolean hasUsedHeapMB() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint32 used_heap_MB = 3; + * + *
+       ** the amount of used heap, in MB. 
+       * 
+ */ + public int getUsedHeapMB() { + return usedHeapMB_; + } + /** + * optional uint32 used_heap_MB = 3; + * + *
+       ** the amount of used heap, in MB. 
+       * 
+ */ + public Builder setUsedHeapMB(int value) { + bitField0_ |= 0x00000004; + usedHeapMB_ = value; + onChanged(); + return this; + } + /** + * optional uint32 used_heap_MB = 3; + * + *
+       ** the amount of used heap, in MB. 
+       * 
+ */ + public Builder clearUsedHeapMB() { + bitField0_ = (bitField0_ & ~0x00000004); + usedHeapMB_ = 0; + onChanged(); + return this; + } + + // optional uint32 max_heap_MB = 4; + private int maxHeapMB_ ; + /** + * optional uint32 max_heap_MB = 4; + * + *
+       ** the maximum allowable size of the heap, in MB. 
+       * 
+ */ + public boolean hasMaxHeapMB() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional uint32 max_heap_MB = 4; + * + *
+       ** the maximum allowable size of the heap, in MB. 
+       * 
+ */ + public int getMaxHeapMB() { + return maxHeapMB_; + } + /** + * optional uint32 max_heap_MB = 4; + * + *
+       ** the maximum allowable size of the heap, in MB. 
+       * 
+ */ + public Builder setMaxHeapMB(int value) { + bitField0_ |= 0x00000008; + maxHeapMB_ = value; + onChanged(); + return this; + } + /** + * optional uint32 max_heap_MB = 4; + * + *
+       ** the maximum allowable size of the heap, in MB. 
+       * 
+ */ + public Builder clearMaxHeapMB() { + bitField0_ = (bitField0_ & ~0x00000008); + maxHeapMB_ = 0; + onChanged(); + return this; + } + + // repeated .RegionLoad region_loads = 5; + private java.util.List regionLoads_ = + java.util.Collections.emptyList(); + private void ensureRegionLoadsIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + regionLoads_ = new java.util.ArrayList(regionLoads_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> regionLoadsBuilder_; + + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public java.util.List getRegionLoadsList() { + if (regionLoadsBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionLoads_); + } else { + return regionLoadsBuilder_.getMessageList(); + } + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public int getRegionLoadsCount() { + if (regionLoadsBuilder_ == null) { + return regionLoads_.size(); + } else { + return regionLoadsBuilder_.getCount(); + } + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { + if (regionLoadsBuilder_ == null) { + return regionLoads_.get(index); + } else { + return regionLoadsBuilder_.getMessage(index); + } + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder setRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionLoadsIsMutable(); + regionLoads_.set(index, value); + onChanged(); + } else { + regionLoadsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder setRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.set(index, builderForValue.build()); + onChanged(); + } else { + regionLoadsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder addRegionLoads(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { if (regionLoadsBuilder_ == null) { - if (((bitField0_ & 0x00000010) == 0x00000010)) { - regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_); - bitField0_ = (bitField0_ & ~0x00000010); + if (value == null) { + throw new NullPointerException(); } - result.regionLoads_ = regionLoads_; + ensureRegionLoadsIsMutable(); + regionLoads_.add(value); + onChanged(); } else { - result.regionLoads_ = regionLoadsBuilder_.build(); + regionLoadsBuilder_.addMessage(value); } - if (coprocessorsBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020)) { - coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); - bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder addRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { + if (regionLoadsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } - result.coprocessors_ = coprocessors_; + ensureRegionLoadsIsMutable(); + regionLoads_.add(index, value); + onChanged(); } else { - result.coprocessors_ = coprocessorsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000010; - } - result.reportStartTime_ = reportStartTime_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000020; - } - result.reportEndTime_ = reportEndTime_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000040; + regionLoadsBuilder_.addMessage(index, value); } - result.infoServerPort_ = infoServerPort_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + return this; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)other); + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder addRegionLoads( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.add(builderForValue.build()); + onChanged(); } else { - super.mergeFrom(other); - return this; + regionLoadsBuilder_.addMessage(builderForValue.build()); } + return this; } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) return this; - if (other.hasNumberOfRequests()) { - setNumberOfRequests(other.getNumberOfRequests()); - } - if (other.hasTotalNumberOfRequests()) { - setTotalNumberOfRequests(other.getTotalNumberOfRequests()); - } - if (other.hasUsedHeapMB()) { - setUsedHeapMB(other.getUsedHeapMB()); - } - if (other.hasMaxHeapMB()) { - setMaxHeapMB(other.getMaxHeapMB()); - } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder addRegionLoads( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { - if (!other.regionLoads_.isEmpty()) { - if (regionLoads_.isEmpty()) { - regionLoads_ = other.regionLoads_; - bitField0_ = (bitField0_ & ~0x00000010); - } else { - ensureRegionLoadsIsMutable(); - regionLoads_.addAll(other.regionLoads_); - } - onChanged(); - } + ensureRegionLoadsIsMutable(); + regionLoads_.add(index, builderForValue.build()); + onChanged(); } else { - if (!other.regionLoads_.isEmpty()) { - if (regionLoadsBuilder_.isEmpty()) { - regionLoadsBuilder_.dispose(); - regionLoadsBuilder_ = null; - regionLoads_ = other.regionLoads_; - bitField0_ = (bitField0_ & ~0x00000010); - regionLoadsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getRegionLoadsFieldBuilder() : null; - } else { - regionLoadsBuilder_.addAllMessages(other.regionLoads_); - } - } + regionLoadsBuilder_.addMessage(index, builderForValue.build()); } - if (coprocessorsBuilder_ == null) { - if (!other.coprocessors_.isEmpty()) { - if (coprocessors_.isEmpty()) { - coprocessors_ = other.coprocessors_; - bitField0_ = (bitField0_ & ~0x00000020); - } else { - ensureCoprocessorsIsMutable(); - coprocessors_.addAll(other.coprocessors_); - } - onChanged(); - } + return this; + } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder addAllRegionLoads( + java.lang.Iterable values) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + super.addAll(values, regionLoads_); + onChanged(); } else { - if (!other.coprocessors_.isEmpty()) { - if (coprocessorsBuilder_.isEmpty()) { - coprocessorsBuilder_.dispose(); - coprocessorsBuilder_ = null; - coprocessors_ = other.coprocessors_; - bitField0_ = (bitField0_ & ~0x00000020); - coprocessorsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getCoprocessorsFieldBuilder() : null; - } else { - coprocessorsBuilder_.addAllMessages(other.coprocessors_); - } - } - } - if (other.hasReportStartTime()) { - setReportStartTime(other.getReportStartTime()); - } - if (other.hasReportEndTime()) { - setReportEndTime(other.getReportEndTime()); - } - if (other.hasInfoServerPort()) { - setInfoServerPort(other.getInfoServerPort()); + regionLoadsBuilder_.addAllMessages(values); } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - for (int i = 0; i < getRegionLoadsCount(); i++) { - if (!getRegionLoads(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getCoprocessorsCount(); i++) { - if (!getCoprocessors(i).isInitialized()) { - - return false; - } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder clearRegionLoads() { + if (regionLoadsBuilder_ == null) { + regionLoads_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + regionLoadsBuilder_.clear(); } - return true; + return this; } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public Builder removeRegionLoads(int index) { + if (regionLoadsBuilder_ == null) { + ensureRegionLoadsIsMutable(); + regionLoads_.remove(index); + onChanged(); + } else { + regionLoadsBuilder_.remove(index); } return this; } - private int bitField0_; - - // optional uint32 number_of_requests = 1; - private int numberOfRequests_ ; /** - * optional uint32 number_of_requests = 1; + * repeated .RegionLoad region_loads = 5; + * + *
+       ** Information on the load of individual regions. 
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder getRegionLoadsBuilder( + int index) { + return getRegionLoadsFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionLoad region_loads = 5; * *
-       ** Number of requests since last report. 
+       ** Information on the load of individual regions. 
        * 
*/ - public boolean hasNumberOfRequests() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( + int index) { + if (regionLoadsBuilder_ == null) { + return regionLoads_.get(index); } else { + return regionLoadsBuilder_.getMessageOrBuilder(index); + } } /** - * optional uint32 number_of_requests = 1; + * repeated .RegionLoad region_loads = 5; * *
-       ** Number of requests since last report. 
+       ** Information on the load of individual regions. 
        * 
*/ - public int getNumberOfRequests() { - return numberOfRequests_; + public java.util.List + getRegionLoadsOrBuilderList() { + if (regionLoadsBuilder_ != null) { + return regionLoadsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionLoads_); + } } /** - * optional uint32 number_of_requests = 1; + * repeated .RegionLoad region_loads = 5; * *
-       ** Number of requests since last report. 
+       ** Information on the load of individual regions. 
        * 
*/ - public Builder setNumberOfRequests(int value) { - bitField0_ |= 0x00000001; - numberOfRequests_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder() { + return getRegionLoadsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()); } /** - * optional uint32 number_of_requests = 1; + * repeated .RegionLoad region_loads = 5; * *
-       ** Number of requests since last report. 
+       ** Information on the load of individual regions. 
        * 
*/ - public Builder clearNumberOfRequests() { - bitField0_ = (bitField0_ & ~0x00000001); - numberOfRequests_ = 0; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder( + int index) { + return getRegionLoadsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()); } - - // optional uint32 total_number_of_requests = 2; - private int totalNumberOfRequests_ ; /** - * optional uint32 total_number_of_requests = 2; + * repeated .RegionLoad region_loads = 5; * *
-       ** Total Number of requests from the start of the region server. 
+       ** Information on the load of individual regions. 
        * 
*/ - public boolean hasTotalNumberOfRequests() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public java.util.List + getRegionLoadsBuilderList() { + return getRegionLoadsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> + getRegionLoadsFieldBuilder() { + if (regionLoadsBuilder_ == null) { + regionLoadsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>( + regionLoads_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + regionLoads_ = null; + } + return regionLoadsBuilder_; + } + + // repeated .Coprocessor coprocessors = 6; + private java.util.List coprocessors_ = + java.util.Collections.emptyList(); + private void ensureCoprocessorsIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + coprocessors_ = new java.util.ArrayList(coprocessors_); + bitField0_ |= 0x00000020; + } } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> coprocessorsBuilder_; + /** - * optional uint32 total_number_of_requests = 2; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Total Number of requests from the start of the region server. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public int getTotalNumberOfRequests() { - return totalNumberOfRequests_; + public java.util.List getCoprocessorsList() { + if (coprocessorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(coprocessors_); + } else { + return coprocessorsBuilder_.getMessageList(); + } } /** - * optional uint32 total_number_of_requests = 2; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Total Number of requests from the start of the region server. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder setTotalNumberOfRequests(int value) { - bitField0_ |= 0x00000002; - totalNumberOfRequests_ = value; - onChanged(); - return this; + public int getCoprocessorsCount() { + if (coprocessorsBuilder_ == null) { + return coprocessors_.size(); + } else { + return coprocessorsBuilder_.getCount(); + } } /** - * optional uint32 total_number_of_requests = 2; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Total Number of requests from the start of the region server. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder clearTotalNumberOfRequests() { - bitField0_ = (bitField0_ & ~0x00000002); - totalNumberOfRequests_ = 0; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { + if (coprocessorsBuilder_ == null) { + return coprocessors_.get(index); + } else { + return coprocessorsBuilder_.getMessage(index); + } } - - // optional uint32 used_heap_MB = 3; - private int usedHeapMB_ ; /** - * optional uint32 used_heap_MB = 3; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the amount of used heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public boolean hasUsedHeapMB() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public Builder setCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.set(index, value); + onChanged(); + } else { + coprocessorsBuilder_.setMessage(index, value); + } + return this; } /** - * optional uint32 used_heap_MB = 3; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the amount of used heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public int getUsedHeapMB() { - return usedHeapMB_; + public Builder setCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.set(index, builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; } /** - * optional uint32 used_heap_MB = 3; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the amount of used heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder setUsedHeapMB(int value) { - bitField0_ |= 0x00000004; - usedHeapMB_ = value; - onChanged(); + public Builder addCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.add(value); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(value); + } return this; } /** - * optional uint32 used_heap_MB = 3; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the amount of used heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder clearUsedHeapMB() { - bitField0_ = (bitField0_ & ~0x00000004); - usedHeapMB_ = 0; - onChanged(); + public Builder addCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { + if (coprocessorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoprocessorsIsMutable(); + coprocessors_.add(index, value); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(index, value); + } return this; } - - // optional uint32 max_heap_MB = 4; - private int maxHeapMB_ ; /** - * optional uint32 max_heap_MB = 4; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the maximum allowable size of the heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public boolean hasMaxHeapMB() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public Builder addCoprocessors( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.add(builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(builderForValue.build()); + } + return this; } /** - * optional uint32 max_heap_MB = 4; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the maximum allowable size of the heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public int getMaxHeapMB() { - return maxHeapMB_; + public Builder addCoprocessors( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.add(index, builderForValue.build()); + onChanged(); + } else { + coprocessorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; } /** - * optional uint32 max_heap_MB = 4; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the maximum allowable size of the heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder setMaxHeapMB(int value) { - bitField0_ |= 0x00000008; - maxHeapMB_ = value; - onChanged(); + public Builder addAllCoprocessors( + java.lang.Iterable values) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + super.addAll(values, coprocessors_); + onChanged(); + } else { + coprocessorsBuilder_.addAllMessages(values); + } return this; } /** - * optional uint32 max_heap_MB = 4; + * repeated .Coprocessor coprocessors = 6; * *
-       ** the maximum allowable size of the heap, in MB. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder clearMaxHeapMB() { - bitField0_ = (bitField0_ & ~0x00000008); - maxHeapMB_ = 0; - onChanged(); + public Builder clearCoprocessors() { + if (coprocessorsBuilder_ == null) { + coprocessors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + coprocessorsBuilder_.clear(); + } return this; } - - // repeated .RegionLoad region_loads = 5; - private java.util.List regionLoads_ = - java.util.Collections.emptyList(); - private void ensureRegionLoadsIsMutable() { - if (!((bitField0_ & 0x00000010) == 0x00000010)) { - regionLoads_ = new java.util.ArrayList(regionLoads_); - bitField0_ |= 0x00000010; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> regionLoadsBuilder_; - /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public java.util.List getRegionLoadsList() { - if (regionLoadsBuilder_ == null) { - return java.util.Collections.unmodifiableList(regionLoads_); + public Builder removeCoprocessors(int index) { + if (coprocessorsBuilder_ == null) { + ensureCoprocessorsIsMutable(); + coprocessors_.remove(index); + onChanged(); } else { - return regionLoadsBuilder_.getMessageList(); + coprocessorsBuilder_.remove(index); } + return this; } /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public int getRegionLoadsCount() { - if (regionLoadsBuilder_ == null) { - return regionLoads_.size(); - } else { - return regionLoadsBuilder_.getCount(); - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getCoprocessorsBuilder( + int index) { + return getCoprocessorsFieldBuilder().getBuilder(index); } /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) { - if (regionLoadsBuilder_ == null) { - return regionLoads_.get(index); - } else { - return regionLoadsBuilder_.getMessage(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + int index) { + if (coprocessorsBuilder_ == null) { + return coprocessors_.get(index); } else { + return coprocessorsBuilder_.getMessageOrBuilder(index); } } /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder setRegionLoads( - int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { - if (regionLoadsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRegionLoadsIsMutable(); - regionLoads_.set(index, value); - onChanged(); + public java.util.List + getCoprocessorsOrBuilderList() { + if (coprocessorsBuilder_ != null) { + return coprocessorsBuilder_.getMessageOrBuilderList(); } else { - regionLoadsBuilder_.setMessage(index, value); + return java.util.Collections.unmodifiableList(coprocessors_); } - return this; } /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder setRegionLoads( - int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { - if (regionLoadsBuilder_ == null) { - ensureRegionLoadsIsMutable(); - regionLoads_.set(index, builderForValue.build()); - onChanged(); - } else { - regionLoadsBuilder_.setMessage(index, builderForValue.build()); - } - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder() { + return getCoprocessorsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); } /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder addRegionLoads(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { - if (regionLoadsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRegionLoadsIsMutable(); - regionLoads_.add(value); - onChanged(); - } else { - regionLoadsBuilder_.addMessage(value); - } - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder( + int index) { + return getCoprocessorsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); } /** - * repeated .RegionLoad region_loads = 5; + * repeated .Coprocessor coprocessors = 6; * *
-       ** Information on the load of individual regions. 
+       **
+       * Regionserver-level coprocessors, e.g., WALObserver implementations.
+       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+       * objects.
        * 
*/ - public Builder addRegionLoads( - int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) { - if (regionLoadsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRegionLoadsIsMutable(); - regionLoads_.add(index, value); - onChanged(); - } else { - regionLoadsBuilder_.addMessage(index, value); + public java.util.List + getCoprocessorsBuilderList() { + return getCoprocessorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> + getCoprocessorsFieldBuilder() { + if (coprocessorsBuilder_ == null) { + coprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>( + coprocessors_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + coprocessors_ = null; } - return this; + return coprocessorsBuilder_; } + + // optional uint64 report_start_time = 7; + private long reportStartTime_ ; /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_start_time = 7; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public Builder addRegionLoads( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { - if (regionLoadsBuilder_ == null) { - ensureRegionLoadsIsMutable(); - regionLoads_.add(builderForValue.build()); - onChanged(); - } else { - regionLoadsBuilder_.addMessage(builderForValue.build()); - } - return this; + public boolean hasReportStartTime() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint64 report_start_time = 7; + * + *
+       **
+       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
+       * 
+ */ + public long getReportStartTime() { + return reportStartTime_; } /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_start_time = 7; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public Builder addRegionLoads( - int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { - if (regionLoadsBuilder_ == null) { - ensureRegionLoadsIsMutable(); - regionLoads_.add(index, builderForValue.build()); - onChanged(); - } else { - regionLoadsBuilder_.addMessage(index, builderForValue.build()); - } + public Builder setReportStartTime(long value) { + bitField0_ |= 0x00000040; + reportStartTime_ = value; + onChanged(); return this; } /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_start_time = 7; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public Builder addAllRegionLoads( - java.lang.Iterable values) { - if (regionLoadsBuilder_ == null) { - ensureRegionLoadsIsMutable(); - super.addAll(values, regionLoads_); - onChanged(); - } else { - regionLoadsBuilder_.addAllMessages(values); - } + public Builder clearReportStartTime() { + bitField0_ = (bitField0_ & ~0x00000040); + reportStartTime_ = 0L; + onChanged(); return this; } + + // optional uint64 report_end_time = 8; + private long reportEndTime_ ; /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_end_time = 8; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when report was generated.
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public Builder clearRegionLoads() { - if (regionLoadsBuilder_ == null) { - regionLoads_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000010); - onChanged(); - } else { - regionLoadsBuilder_.clear(); - } - return this; + public boolean hasReportEndTime() { + return ((bitField0_ & 0x00000080) == 0x00000080); } /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_end_time = 8; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when report was generated.
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public Builder removeRegionLoads(int index) { - if (regionLoadsBuilder_ == null) { - ensureRegionLoadsIsMutable(); - regionLoads_.remove(index); - onChanged(); - } else { - regionLoadsBuilder_.remove(index); - } - return this; + public long getReportEndTime() { + return reportEndTime_; } /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_end_time = 8; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when report was generated.
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder getRegionLoadsBuilder( - int index) { - return getRegionLoadsFieldBuilder().getBuilder(index); + public Builder setReportEndTime(long value) { + bitField0_ |= 0x00000080; + reportEndTime_ = value; + onChanged(); + return this; } /** - * repeated .RegionLoad region_loads = 5; + * optional uint64 report_end_time = 8; * *
-       ** Information on the load of individual regions. 
+       **
+       * Time when report was generated.
+       * time is measured as the difference, measured in milliseconds, between the current time
+       * and midnight, January 1, 1970 UTC.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder( - int index) { - if (regionLoadsBuilder_ == null) { - return regionLoads_.get(index); } else { - return regionLoadsBuilder_.getMessageOrBuilder(index); - } + public Builder clearReportEndTime() { + bitField0_ = (bitField0_ & ~0x00000080); + reportEndTime_ = 0L; + onChanged(); + return this; } + + // optional uint32 info_server_port = 9; + private int infoServerPort_ ; /** - * repeated .RegionLoad region_loads = 5; + * optional uint32 info_server_port = 9; * *
-       ** Information on the load of individual regions. 
+       **
+       * The port number that this region server is hosing an info server on.
        * 
*/ - public java.util.List - getRegionLoadsOrBuilderList() { - if (regionLoadsBuilder_ != null) { - return regionLoadsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(regionLoads_); - } + public boolean hasInfoServerPort() { + return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * repeated .RegionLoad region_loads = 5; + * optional uint32 info_server_port = 9; * *
-       ** Information on the load of individual regions. 
+       **
+       * The port number that this region server is hosing an info server on.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder() { - return getRegionLoadsFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()); + public int getInfoServerPort() { + return infoServerPort_; } /** - * repeated .RegionLoad region_loads = 5; + * optional uint32 info_server_port = 9; * *
-       ** Information on the load of individual regions. 
+       **
+       * The port number that this region server is hosing an info server on.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder( - int index) { - return getRegionLoadsFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()); + public Builder setInfoServerPort(int value) { + bitField0_ |= 0x00000100; + infoServerPort_ = value; + onChanged(); + return this; } /** - * repeated .RegionLoad region_loads = 5; + * optional uint32 info_server_port = 9; * *
-       ** Information on the load of individual regions. 
+       **
+       * The port number that this region server is hosing an info server on.
        * 
*/ - public java.util.List - getRegionLoadsBuilderList() { - return getRegionLoadsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> - getRegionLoadsFieldBuilder() { - if (regionLoadsBuilder_ == null) { - regionLoadsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>( - regionLoads_, - ((bitField0_ & 0x00000010) == 0x00000010), - getParentForChildren(), - isClean()); - regionLoads_ = null; - } - return regionLoadsBuilder_; + public Builder clearInfoServerPort() { + bitField0_ = (bitField0_ & ~0x00000100); + infoServerPort_ = 0; + onChanged(); + return this; } - // repeated .Coprocessor coprocessors = 6; - private java.util.List coprocessors_ = + // repeated .ReplicationLoadSource replLoadSource = 10; + private java.util.List replLoadSource_ = java.util.Collections.emptyList(); - private void ensureCoprocessorsIsMutable() { - if (!((bitField0_ & 0x00000020) == 0x00000020)) { - coprocessors_ = new java.util.ArrayList(coprocessors_); - bitField0_ |= 0x00000020; + private void ensureReplLoadSourceIsMutable() { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = new java.util.ArrayList(replLoadSource_); + bitField0_ |= 0x00000200; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> coprocessorsBuilder_; + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> replLoadSourceBuilder_; /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public java.util.List getCoprocessorsList() { - if (coprocessorsBuilder_ == null) { - return java.util.Collections.unmodifiableList(coprocessors_); + public java.util.List getReplLoadSourceList() { + if (replLoadSourceBuilder_ == null) { + return java.util.Collections.unmodifiableList(replLoadSource_); } else { - return coprocessorsBuilder_.getMessageList(); + return replLoadSourceBuilder_.getMessageList(); } } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public int getCoprocessorsCount() { - if (coprocessorsBuilder_ == null) { - return coprocessors_.size(); + public int getReplLoadSourceCount() { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.size(); } else { - return coprocessorsBuilder_.getCount(); + return replLoadSourceBuilder_.getCount(); } } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) { - if (coprocessorsBuilder_ == null) { - return coprocessors_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.get(index); } else { - return coprocessorsBuilder_.getMessage(index); + return replLoadSourceBuilder_.getMessage(index); } } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder setCoprocessors( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { - if (coprocessorsBuilder_ == null) { + public Builder setReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureCoprocessorsIsMutable(); - coprocessors_.set(index, value); + ensureReplLoadSourceIsMutable(); + replLoadSource_.set(index, value); onChanged(); } else { - coprocessorsBuilder_.setMessage(index, value); + replLoadSourceBuilder_.setMessage(index, value); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder setCoprocessors( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { - if (coprocessorsBuilder_ == null) { - ensureCoprocessorsIsMutable(); - coprocessors_.set(index, builderForValue.build()); + public Builder setReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.set(index, builderForValue.build()); onChanged(); } else { - coprocessorsBuilder_.setMessage(index, builderForValue.build()); + replLoadSourceBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder addCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { - if (coprocessorsBuilder_ == null) { + public Builder addReplLoadSource(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureCoprocessorsIsMutable(); - coprocessors_.add(value); + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(value); onChanged(); } else { - coprocessorsBuilder_.addMessage(value); + replLoadSourceBuilder_.addMessage(value); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder addCoprocessors( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) { - if (coprocessorsBuilder_ == null) { + public Builder addReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureCoprocessorsIsMutable(); - coprocessors_.add(index, value); + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(index, value); onChanged(); } else { - coprocessorsBuilder_.addMessage(index, value); + replLoadSourceBuilder_.addMessage(index, value); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder addCoprocessors( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { - if (coprocessorsBuilder_ == null) { - ensureCoprocessorsIsMutable(); - coprocessors_.add(builderForValue.build()); + public Builder addReplLoadSource( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(builderForValue.build()); onChanged(); } else { - coprocessorsBuilder_.addMessage(builderForValue.build()); + replLoadSourceBuilder_.addMessage(builderForValue.build()); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder addCoprocessors( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) { - if (coprocessorsBuilder_ == null) { - ensureCoprocessorsIsMutable(); - coprocessors_.add(index, builderForValue.build()); + public Builder addReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(index, builderForValue.build()); onChanged(); } else { - coprocessorsBuilder_.addMessage(index, builderForValue.build()); + replLoadSourceBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder addAllCoprocessors( - java.lang.Iterable values) { - if (coprocessorsBuilder_ == null) { - ensureCoprocessorsIsMutable(); - super.addAll(values, coprocessors_); + public Builder addAllReplLoadSource( + java.lang.Iterable values) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + super.addAll(values, replLoadSource_); onChanged(); } else { - coprocessorsBuilder_.addAllMessages(values); + replLoadSourceBuilder_.addAllMessages(values); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder clearCoprocessors() { - if (coprocessorsBuilder_ == null) { - coprocessors_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); + public Builder clearReplLoadSource() { + if (replLoadSourceBuilder_ == null) { + replLoadSource_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); onChanged(); } else { - coprocessorsBuilder_.clear(); + replLoadSourceBuilder_.clear(); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public Builder removeCoprocessors(int index) { - if (coprocessorsBuilder_ == null) { - ensureCoprocessorsIsMutable(); - coprocessors_.remove(index); + public Builder removeReplLoadSource(int index) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.remove(index); onChanged(); } else { - coprocessorsBuilder_.remove(index); + replLoadSourceBuilder_.remove(index); } return this; } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getCoprocessorsBuilder( + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder getReplLoadSourceBuilder( int index) { - return getCoprocessorsFieldBuilder().getBuilder(index); + return getReplLoadSourceFieldBuilder().getBuilder(index); } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( int index) { - if (coprocessorsBuilder_ == null) { - return coprocessors_.get(index); } else { - return coprocessorsBuilder_.getMessageOrBuilder(index); + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.get(index); } else { + return replLoadSourceBuilder_.getMessageOrBuilder(index); } } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public java.util.List - getCoprocessorsOrBuilderList() { - if (coprocessorsBuilder_ != null) { - return coprocessorsBuilder_.getMessageOrBuilderList(); + public java.util.List + getReplLoadSourceOrBuilderList() { + if (replLoadSourceBuilder_ != null) { + return replLoadSourceBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(coprocessors_); + return java.util.Collections.unmodifiableList(replLoadSource_); } } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder() { - return getCoprocessorsFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder() { + return getReplLoadSourceFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()); } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder( + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder( int index) { - return getCoprocessorsFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()); + return getReplLoadSourceFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()); } /** - * repeated .Coprocessor coprocessors = 6; + * repeated .ReplicationLoadSource replLoadSource = 10; * *
        **
-       * Regionserver-level coprocessors, e.g., WALObserver implementations.
-       * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-       * objects.
+       * The replicationLoadSource for the replication Source status of this region server .
        * 
*/ - public java.util.List - getCoprocessorsBuilderList() { - return getCoprocessorsFieldBuilder().getBuilderList(); + public java.util.List + getReplLoadSourceBuilderList() { + return getReplLoadSourceFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> - getCoprocessorsFieldBuilder() { - if (coprocessorsBuilder_ == null) { - coprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>( - coprocessors_, - ((bitField0_ & 0x00000020) == 0x00000020), + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> + getReplLoadSourceFieldBuilder() { + if (replLoadSourceBuilder_ == null) { + replLoadSourceBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>( + replLoadSource_, + ((bitField0_ & 0x00000200) == 0x00000200), getParentForChildren(), isClean()); - coprocessors_ = null; + replLoadSource_ = null; } - return coprocessorsBuilder_; + return replLoadSourceBuilder_; } - // optional uint64 report_start_time = 7; - private long reportStartTime_ ; + // optional .ReplicationLoadSink replLoadSink = 11; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> replLoadSinkBuilder_; /** - * optional uint64 report_start_time = 7; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public boolean hasReportStartTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); + public boolean hasReplLoadSink() { + return ((bitField0_ & 0x00000400) == 0x00000400); } /** - * optional uint64 report_start_time = 7; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public long getReportStartTime() { - return reportStartTime_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() { + if (replLoadSinkBuilder_ == null) { + return replLoadSink_; + } else { + return replLoadSinkBuilder_.getMessage(); + } } /** - * optional uint64 report_start_time = 7; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public Builder setReportStartTime(long value) { - bitField0_ |= 0x00000040; - reportStartTime_ = value; - onChanged(); + public Builder setReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) { + if (replLoadSinkBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replLoadSink_ = value; + onChanged(); + } else { + replLoadSinkBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; return this; } /** - * optional uint64 report_start_time = 7; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public Builder clearReportStartTime() { - bitField0_ = (bitField0_ & ~0x00000040); - reportStartTime_ = 0L; - onChanged(); + public Builder setReplLoadSink( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder builderForValue) { + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = builderForValue.build(); + onChanged(); + } else { + replLoadSinkBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; return this; } - - // optional uint64 report_end_time = 8; - private long reportEndTime_ ; - /** - * optional uint64 report_end_time = 8; - * - *
-       **
-       * Time when report was generated.
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
-       * 
- */ - public boolean hasReportEndTime() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional uint64 report_end_time = 8; - * - *
-       **
-       * Time when report was generated.
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
-       * 
- */ - public long getReportEndTime() { - return reportEndTime_; - } /** - * optional uint64 report_end_time = 8; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * Time when report was generated.
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public Builder setReportEndTime(long value) { - bitField0_ |= 0x00000080; - reportEndTime_ = value; - onChanged(); + public Builder mergeReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) { + if (replLoadSinkBuilder_ == null) { + if (((bitField0_ & 0x00000400) == 0x00000400) && + replLoadSink_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) { + replLoadSink_ = + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder(replLoadSink_).mergeFrom(value).buildPartial(); + } else { + replLoadSink_ = value; + } + onChanged(); + } else { + replLoadSinkBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000400; return this; } /** - * optional uint64 report_end_time = 8; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * Time when report was generated.
-       * time is measured as the difference, measured in milliseconds, between the current time
-       * and midnight, January 1, 1970 UTC.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public Builder clearReportEndTime() { - bitField0_ = (bitField0_ & ~0x00000080); - reportEndTime_ = 0L; - onChanged(); + public Builder clearReplLoadSink() { + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + onChanged(); + } else { + replLoadSinkBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); return this; } - - // optional uint32 info_server_port = 9; - private int infoServerPort_ ; - /** - * optional uint32 info_server_port = 9; - * - *
-       **
-       * The port number that this region server is hosing an info server on.
-       * 
- */ - public boolean hasInfoServerPort() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } /** - * optional uint32 info_server_port = 9; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * The port number that this region server is hosing an info server on.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public int getInfoServerPort() { - return infoServerPort_; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder getReplLoadSinkBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return getReplLoadSinkFieldBuilder().getBuilder(); } /** - * optional uint32 info_server_port = 9; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * The port number that this region server is hosing an info server on.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public Builder setInfoServerPort(int value) { - bitField0_ |= 0x00000100; - infoServerPort_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() { + if (replLoadSinkBuilder_ != null) { + return replLoadSinkBuilder_.getMessageOrBuilder(); + } else { + return replLoadSink_; + } } /** - * optional uint32 info_server_port = 9; + * optional .ReplicationLoadSink replLoadSink = 11; * *
        **
-       * The port number that this region server is hosing an info server on.
+       * The replicationLoadSink for the replication Sink status of this region server .
        * 
*/ - public Builder clearInfoServerPort() { - bitField0_ = (bitField0_ & ~0x00000100); - infoServerPort_ = 0; - onChanged(); - return this; + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> + getReplLoadSinkFieldBuilder() { + if (replLoadSinkBuilder_ == null) { + replLoadSinkBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder>( + replLoadSink_, + getParentForChildren(), + isClean()); + replLoadSink_ = null; + } + return replLoadSinkBuilder_; } // @@protoc_insertion_point(builder_scope:ServerLoad) @@ -10315,6 +12580,16 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionLoad_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLoadSink_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLoadSink_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLoadSource_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLoadSource_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_ServerLoad_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -10361,27 +12636,35 @@ public final class ClusterStatusProtos { "ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" + "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + - "\n\024complete_sequence_id\030\017 \001(\004\"\212\002\n\nServerL" + - "oad\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total" + - "_number_of_requests\030\002 \001(\r\022\024\n\014used_heap_M" + - "B\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_l" + - "oads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030" + - "\006 \003(\0132\014.Coprocessor\022\031\n\021report_start_time" + - "\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_", - "server_port\030\t \001(\r\"O\n\016LiveServerInfo\022\033\n\006s" + - "erver\030\001 \002(\0132\013.ServerName\022 \n\013server_load\030" + - "\002 \002(\0132\013.ServerLoad\"\340\002\n\rClusterStatus\022/\n\r" + - "hbase_version\030\001 \001(\0132\030.HBaseVersionFileCo" + - "ntent\022%\n\014live_servers\030\002 \003(\0132\017.LiveServer" + - "Info\022!\n\014dead_servers\030\003 \003(\0132\013.ServerName\022" + - "2\n\025regions_in_transition\030\004 \003(\0132\023.RegionI" + - "nTransition\022\036\n\ncluster_id\030\005 \001(\0132\n.Cluste" + - "rId\022)\n\023master_coprocessors\030\006 \003(\0132\014.Copro" + - "cessor\022\033\n\006master\030\007 \001(\0132\013.ServerName\022#\n\016b", - "ackup_masters\030\010 \003(\0132\013.ServerName\022\023\n\013bala" + - "ncer_on\030\t \001(\010BF\n*org.apache.hadoop.hbase" + - ".protobuf.generatedB\023ClusterStatusProtos" + - "H\001\240\001\001" + "\n\024complete_sequence_id\030\017 \001(\004\"T\n\023Replicat" + + "ionLoadSink\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022" + + "!\n\031timeStampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025R" + + "eplicationLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022" + + "ageOfLastShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQue" + + "ue\030\003 \002(\r\022 \n\030timeStampOfLastShippedOp\030\004 \002" + + "(\004\022\026\n\016replicationLag\030\005 \002(\004\"\346\002\n\nServerLoa", + "d\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total_n" + + "umber_of_requests\030\002 \001(\r\022\024\n\014used_heap_MB\030" + + "\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_loa" + + "ds\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030\006 " + + "\003(\0132\014.Coprocessor\022\031\n\021report_start_time\030\007" + + " \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_se" + + "rver_port\030\t \001(\r\022.\n\016replLoadSource\030\n \003(\0132" + + "\026.ReplicationLoadSource\022*\n\014replLoadSink\030" + + "\013 \001(\0132\024.ReplicationLoadSink\"O\n\016LiveServe" + + "rInfo\022\033\n\006server\030\001 \002(\0132\013.ServerName\022 \n\013se", + "rver_load\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rCluster" + + "Status\022/\n\rhbase_version\030\001 \001(\0132\030.HBaseVer" + + "sionFileContent\022%\n\014live_servers\030\002 \003(\0132\017." + + "LiveServerInfo\022!\n\014dead_servers\030\003 \003(\0132\013.S" + + "erverName\0222\n\025regions_in_transition\030\004 \003(\013" + + "2\023.RegionInTransition\022\036\n\ncluster_id\030\005 \001(" + + "\0132\n.ClusterId\022)\n\023master_coprocessors\030\006 \003" + + "(\0132\014.Coprocessor\022\033\n\006master\030\007 \001(\0132\013.Serve" + + "rName\022#\n\016backup_masters\030\010 \003(\0132\013.ServerNa" + + "me\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.ha", + "doop.hbase.protobuf.generatedB\023ClusterSt" + + "atusProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10406,20 +12689,32 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", }); - internal_static_ServerLoad_descriptor = + internal_static_ReplicationLoadSink_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_ReplicationLoadSink_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLoadSink_descriptor, + new java.lang.String[] { "AgeOfLastAppliedOp", "TimeStampsOfLastAppliedOp", }); + internal_static_ReplicationLoadSource_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_ReplicationLoadSource_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLoadSource_descriptor, + new java.lang.String[] { "PeerID", "AgeOfLastShippedOp", "SizeOfLogQueue", "TimeStampOfLastShippedOp", "ReplicationLag", }); + internal_static_ServerLoad_descriptor = + getDescriptor().getMessageTypes().get(5); internal_static_ServerLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerLoad_descriptor, - new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", }); + new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", "ReplLoadSource", "ReplLoadSink", }); internal_static_LiveServerInfo_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_LiveServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LiveServerInfo_descriptor, new java.lang.String[] { "Server", "ServerLoad", }); internal_static_ClusterStatus_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_ClusterStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterStatus_descriptor, diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index dbf00dc..93e9b0c 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -113,6 +113,18 @@ message RegionLoad { } /* Server-level protobufs */ +message ReplicationLoadSink { + required uint64 ageOfLastAppliedOp = 1; + required uint64 timeStampsOfLastAppliedOp = 2; +} + +message ReplicationLoadSource { + required string peerID = 1; + required uint64 ageOfLastShippedOp = 2; + required uint32 sizeOfLogQueue = 3; + required uint64 timeStampOfLastShippedOp = 4; + required uint64 replicationLag = 5; +} message ServerLoad { /** Number of requests since last report. */ @@ -155,6 +167,16 @@ message ServerLoad { * The port number that this region server is hosing an info server on. */ optional uint32 info_server_port = 9; + + /** + * The replicationLoadSource for the replication Source status of this region server . + */ + repeated ReplicationLoadSource replLoadSource = 10; + + /** + * The replicationLoadSink for the replication Sink status of this region server . + */ + optional ReplicationLoadSink replLoadSink = 11; } message LiveServerInfo { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 7947c4a..f380a3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -147,6 +147,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.data.Stat; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -1001,6 +1002,7 @@ public class HRegionServer extends HasThread implements } } + ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) { // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests // per second, and other metrics As long as metrics are part of ServerLoad it's best to use @@ -1037,6 +1039,21 @@ public class HRegionServer extends HasThread implements } else { serverLoad.setInfoServerPort(-1); } + + // for the replicationLoad purpose. Only need to get from one service + // either source or sink will get the same info + ReplicationSourceService rsources = getReplicationSourceService(); + + if (rsources != null) { + // always refresh first to get the latest value + ReplicationLoad rLoad = rsources.refreshAndGetReplicationLoad(); + if (rLoad != null) { + serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); + for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad.getReplicationLoadSourceList()) { + serverLoad.addReplLoadSource(rLS); + } + } + } return serverLoad.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index cef7b46..f001ae5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; /** * Gateway to Cluster Replication. @@ -52,4 +53,10 @@ public interface ReplicationService { * Stops replication service. */ void stopReplicationService(); + + + /** + * Refresh and Get ReplicationLoad + */ + public ReplicationLoad refreshAndGetReplicationLoad(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index d649edf..9fff02f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -36,6 +36,8 @@ public class MetricsSink { private MetricsReplicationSource rms; private long lastTimestampForAge = System.currentTimeMillis(); + private long age; + public MetricsSink() { rms = CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); } @@ -47,10 +49,9 @@ public class MetricsSink { * @return the age that was set */ public long setAgeOfLastAppliedOp(long timestamp) { - long age = 0; if (lastTimestampForAge != timestamp) { lastTimestampForAge = timestamp; - age = System.currentTimeMillis() - lastTimestampForAge; + this.age = System.currentTimeMillis() - lastTimestampForAge; } rms.setGauge(SINK_AGE_OF_LAST_APPLIED_OP, age); return age; @@ -75,4 +76,20 @@ public class MetricsSink { rms.incCounters(SINK_APPLIED_OPS, batchSize); } + /** + * Get the Age of Last Applied Op + * @return ageOfLastAppliedOp + */ + public long getAgeOfLastAppliedOp() { + return this.age; + } + + /** + * Get the TimeStampOfLastAppliedOp. If no replication Op applied yet, the value is the timestamp + * at which hbase instance starts + * @return timeStampsOfLastAppliedOp; + */ + public long getTimeStampOfLastAppliedOp() { + return this.lastTimestampForAge; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index eadaead..1b1fda6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -46,6 +46,7 @@ public class MetricsSource { private long lastTimestamp = 0; private int lastQueueSize = 0; + private long age = 0; private String sizeOfLogQueKey; private String ageOfLastShippedOpKey; @@ -83,7 +84,7 @@ public class MetricsSource { * @param timestamp write time of the edit */ public void setAgeOfLastShippedOp(long timestamp) { - long age = EnvironmentEdgeManager.currentTime() - timestamp; + this.age = EnvironmentEdgeManager.currentTime() - timestamp; rms.setGauge(ageOfLastShippedOpKey, age); rms.setGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, age); this.lastTimestamp = timestamp; @@ -141,6 +142,38 @@ public class MetricsSource { } /** + * Get AgeOfLastShippedOp + * @return AgeOfLastShippedOp + */ + public Long getAgeOfLastShippedOp() { + return this.age; + } + + /** + * Get the sizeOfLogQueue + * @return sizeOfLogQueue + */ + public int getSizeOfLogQueue() { + return this.lastQueueSize; + } + + /** + * Get the timeStampsOfLastShippedOp + * @return lastTimestampForAge + */ + public long getTimeStampOfLastShippedOp() { + return lastTimestamp; + } + + /** + * Get the slave peer ID + * @return peerID + */ + public String getPeerID() { + return this.id; + } + + /** * Convience method to apply changes to metrics do to shipping a batch of logs. * * @param batchSize the size of the batch that was shipped to sinks. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index f06ddef..a633b94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_ENABLE_KEY; import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; @@ -79,6 +80,8 @@ public class Replication implements WALActionsListener, private ReplicationSink replicationSink; // Hosting server private Server server; + // ReplicationLoad to access replication metrics + private ReplicationLoad replicationLoad; /** Statistics thread schedule pool */ private ScheduledExecutorService scheduleThreadPool; private int statsThreadPeriod; @@ -135,6 +138,7 @@ public class Replication implements WALActionsListener, this.replicationManager = new ReplicationSourceManager(replicationQueues, replicationPeers, replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId); + this.replicationLoad = new ReplicationLoad(); this.statsThreadPeriod = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod); @@ -143,6 +147,7 @@ public class Replication implements WALActionsListener, this.replicationQueues = null; this.replicationPeers = null; this.replicationTracker = null; + this.replicationLoad = null; } } @@ -309,6 +314,34 @@ public class Replication implements WALActionsListener, // not interested } + private void buildReplicationLoad() { + + // get source + List sources = this.replicationManager.getSources(); + List sourceMetricsList = new ArrayList(); + + for (ReplicationSourceInterface source : sources) { + if (source instanceof ReplicationSource) { + sourceMetricsList.add(((ReplicationSource) source).getSourceMetrics()); + } + } + + // get sink + MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics(); + + this.replicationLoad.buildReplicationLoad(sourceMetricsList, sinkMetrics); + + } + + @Override + public ReplicationLoad refreshAndGetReplicationLoad() { + if (this.replicationLoad == null) return null; + + // always built for latest data + buildReplicationLoad(); + return this.replicationLoad; + } + /* * Statistics thread. Periodically prints the cache statistics to the log. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java new file mode 100644 index 0000000..8849598 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -0,0 +1,157 @@ +/** + * Copyright 2014 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.util.Date; +import java.util.List; +import java.util.ArrayList; + +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.replication.regionserver.MetricsSink; +import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Strings; + +/** + * This class is used exporting some of the info from replication metrics + */ +public class ReplicationLoad { + + // Empty load instance. + public static final ReplicationLoad EMPTY_REPLICATIONLOAD = new ReplicationLoad(); + + private List sourceMetricsList; + private MetricsSink sinkMetrics; + + private List replicationLoadSourceList; + private ClusterStatusProtos.ReplicationLoadSink replicationLoadSink; + + /** default constructor */ + public ReplicationLoad() { + super(); + } + + /** + * buildReplicationLoad + * @param srMetricsList + * @param skMetrics + */ + + public void buildReplicationLoad(final List srMetricsList, + final MetricsSink skMetrics) { + this.sourceMetricsList = srMetricsList; + this.sinkMetrics = skMetrics; + + // build the SinkLoad + ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = + ClusterStatusProtos.ReplicationLoadSink.newBuilder(); + rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); + rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimeStampOfLastAppliedOp()); + this.replicationLoadSink = rLoadSinkBuild.build(); + + // build the SourceLoad List + this.replicationLoadSourceList = new ArrayList(); + for (MetricsSource sm : this.sourceMetricsList) { + long ageOfLastShippedOp = sm.getAgeOfLastShippedOp(); + int sizeOfLogQueue = sm.getSizeOfLogQueue(); + long timeStampOfLastShippedOp = sm.getTimeStampOfLastShippedOp(); + long replicationLag; + long timePassedAfterLastShippedOp = + EnvironmentEdgeManager.currentTime() - timeStampOfLastShippedOp; + if (sizeOfLogQueue != 0) { + // err on the large side + replicationLag = Math.max(ageOfLastShippedOp, timePassedAfterLastShippedOp); + } else if (timePassedAfterLastShippedOp < 2 * ageOfLastShippedOp) { + replicationLag = ageOfLastShippedOp; // last shipped happen recently + } else { + // last shipped may happen last night, + // so NO real lag although ageOfLastShippedOp is non-zero + replicationLag = 0; + } + + ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = + ClusterStatusProtos.ReplicationLoadSource.newBuilder(); + rLoadSourceBuild.setPeerID(sm.getPeerID()); + rLoadSourceBuild.setAgeOfLastShippedOp(ageOfLastShippedOp); + rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue); + rLoadSourceBuild.setTimeStampOfLastShippedOp(timeStampOfLastShippedOp); + rLoadSourceBuild.setReplicationLag(replicationLag); + + this.replicationLoadSourceList.add(rLoadSourceBuild.build()); + } + + } + + /** + * sourceToString + * @return a string contains sourceReplicationLoad information + */ + public String sourceToString() { + if (this.sourceMetricsList == null) return null; + + StringBuilder sb = new StringBuilder(); + + for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceList) { + + sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID()); + sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp()); + sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue()); + sb = + Strings.appendKeyValue(sb, "TimeStampsOfLastShippedOp", (new Date( + rls.getTimeStampOfLastShippedOp()).toString())); + sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag()); + } + + return sb.toString(); + } + + /** + * sinkToString + * @return a string contains sinkReplicationLoad information + */ + public String sinkToString() { + if (this.replicationLoadSink == null) return null; + + StringBuilder sb = new StringBuilder(); + sb = Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", + this.replicationLoadSink.getAgeOfLastAppliedOp()); + sb = Strings.appendKeyValue(sb, "TimeStampsOfLastAppliedOp", + (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); + + return sb.toString(); + } + + public ClusterStatusProtos.ReplicationLoadSink getReplicationLoadSink() { + return this.replicationLoadSink; + } + + public List getReplicationLoadSourceList() { + return this.replicationLoadSourceList; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.sourceToString() + System.getProperty("line.separator") + this.sinkToString(); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 4a4de86..81f773d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -255,4 +255,12 @@ public class ReplicationSink { "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + ", total replicated edits: " + this.totalReplicatedEdits; } + + /** + * Get replication Sink Metrics + * @return MetricsSink + */ + public MetricsSink getSinkMetrics() { + return this.metrics; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index aadcc5d..993bfe6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -853,6 +853,14 @@ public class ReplicationSource extends Thread } } + /** + * Get Replication Source Metrics + * @return sourceMetrics + */ + public MetricsSource getSourceMetrics() { + return this.metrics; + } + @Override public String getStats() { long position = this.repLogReader.getPosition(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 764f01b..46b26bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -46,6 +46,9 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -552,4 +555,48 @@ public class TestReplicationSmallTests extends TestReplicationBase { hadmin.close(); } + /** + * Test for HBASE-9531 + * put a few rows into htable1, which should be replicated to htable2 + * create a ClusterStatus instance 'status' from HBaseAdmin + * test : status.getLoad(server).getReplicationLoadSourceList() + * test : status.getLoad(server).getReplicationLoadSink() + * * @throws Exception + */ + @Test(timeout = 300000) + public void testReplicationStatus() throws Exception { + LOG.info("testReplicationStatus"); + + HBaseAdmin hadmin = new HBaseAdmin(conf1); + + final byte[] qualName = Bytes.toBytes("q"); + Put p; + + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p = new Put(Bytes.toBytes("row" + i)); + p.add(famName, qualName, Bytes.toBytes("val" + i)); + htable1.put(p); + } + + ClusterStatus status = hadmin.getClusterStatus(); + + for (ServerName server : status.getServers()) { + ServerLoad sl = status.getLoad(server); + List rLoadSourceList = + sl.getReplicationLoadSourceList(); + ReplicationLoadSink rLoadSink = + sl.getReplicationLoadSink(); + + // check SourceList has at least one entry + assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() > 0)); + + // check Sink exist only as it is difficult to verify the value on the fly + assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ", + (rLoadSink.getAgeOfLastAppliedOp() >= 0)); + assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ", + (rLoadSink.getTimeStampsOfLastAppliedOp() >= 0)); + } + hadmin.close(); + } + } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index d74d229..08937bd 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -560,7 +560,7 @@ module Hbase end end - def status(format) + def status(format,type) status = @admin.getClusterStatus() if format == "detailed" puts("version %s" % [ status.getHBaseVersion() ]) @@ -587,6 +587,47 @@ module Hbase for server in status.getDeadServerNames() puts(" %s" % [ server ]) end + elsif format == "replication" + #check whether Replication is enabled or not + if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_KEY, org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_DEFAULT)) + puts("please enable replication first") + else + puts("version %s" % [ status.getHBaseVersion() ]) + puts("%d live servers" % [ status.getServersSize() ]) + + for server in status.getServers() + sl = status.getLoad(server) + rSinkString = " SINK :" + rSourceString = " SOURCE:" + + rLoadSink = sl.getReplicationLoadSink(); + rSinkString << "AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s + rSinkString << ", TimeStampsOfLastAppliedOp=" + (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString() + + rLoadSourceList = sl.getReplicationLoadSourceList() + index = 0 + while index < rLoadSourceList.size() + rLoadSource = rLoadSourceList.get(index) + rSourceString << "\n PeerID=" + rLoadSource.getPeerID() + rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s + rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s + rSourceString << ", TimeStampsOfLastShippedOp=" + (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString() + rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s + index = index + 1 + end + puts(" %s:" % + [ server.getHostname() ]) + if type.casecmp("SOURCE") == 0 + puts("%s" % rSourceString) + elsif type.casecmp("SINK") == 0 + puts("%s" % rSinkString) + else + puts("%s" % rSourceString) + puts("%s" % rSinkString) + end + end + + end elsif format == "simple" load = 0 regions = 0 diff --git a/hbase-shell/src/main/ruby/shell/commands/status.rb b/hbase-shell/src/main/ruby/shell/commands/status.rb index f72c13c..4654b4a 100644 --- a/hbase-shell/src/main/ruby/shell/commands/status.rb +++ b/hbase-shell/src/main/ruby/shell/commands/status.rb @@ -22,18 +22,22 @@ module Shell class Status < Command def help return <<-EOF -Show cluster status. Can be 'summary', 'simple', or 'detailed'. The +Show cluster status. Can be 'summary', 'simple', 'detailed', or 'replication'. The default is 'summary'. Examples: hbase> status hbase> status 'simple' hbase> status 'summary' hbase> status 'detailed' + hbase> status 'replication' + hbase> status 'replication', 'source' + hbase> status 'replication', 'sink' + EOF end - def command(format = 'summary') - admin.status(format) + def command(format = 'summary',type = 'both') + admin.status(format,type) end end end