diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 2fad51a..4bf67a1 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -8875,6 +8875,970 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:CompactRegionResponse) } + public interface UpdateFavoredNodesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .OpenRegionRequest.RegionOpenInfo updateRegionInfo = 1; + java.util.List + getUpdateRegionInfoList(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getUpdateRegionInfo(int index); + int getUpdateRegionInfoCount(); + java.util.List + getUpdateRegionInfoOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getUpdateRegionInfoOrBuilder( + int index); + } + public static final class UpdateFavoredNodesRequest extends + com.google.protobuf.GeneratedMessage + implements UpdateFavoredNodesRequestOrBuilder { + // Use UpdateFavoredNodesRequest.newBuilder() to construct. + private UpdateFavoredNodesRequest(Builder builder) { + super(builder); + } + private UpdateFavoredNodesRequest(boolean noInit) {} + + private static final UpdateFavoredNodesRequest defaultInstance; + public static UpdateFavoredNodesRequest getDefaultInstance() { + return defaultInstance; + } + + public UpdateFavoredNodesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_fieldAccessorTable; + } + + // repeated .OpenRegionRequest.RegionOpenInfo updateRegionInfo = 1; + public static final int UPDATEREGIONINFO_FIELD_NUMBER = 1; + private java.util.List updateRegionInfo_; + public java.util.List getUpdateRegionInfoList() { + return updateRegionInfo_; + } + public java.util.List + getUpdateRegionInfoOrBuilderList() { + return updateRegionInfo_; + } + public int getUpdateRegionInfoCount() { + return updateRegionInfo_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getUpdateRegionInfo(int index) { + return updateRegionInfo_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getUpdateRegionInfoOrBuilder( + int index) { + return updateRegionInfo_.get(index); + } + + private void initFields() { + updateRegionInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getUpdateRegionInfoCount(); i++) { + if (!getUpdateRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < updateRegionInfo_.size(); i++) { + output.writeMessage(1, updateRegionInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < updateRegionInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, updateRegionInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) obj; + + boolean result = true; + result = result && getUpdateRegionInfoList() + .equals(other.getUpdateRegionInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getUpdateRegionInfoCount() > 0) { + hash = (37 * hash) + UPDATEREGIONINFO_FIELD_NUMBER; + hash = (53 * hash) + getUpdateRegionInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUpdateRegionInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (updateRegionInfoBuilder_ == null) { + updateRegionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + updateRegionInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest(this); + int from_bitField0_ = bitField0_; + if (updateRegionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + updateRegionInfo_ = java.util.Collections.unmodifiableList(updateRegionInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.updateRegionInfo_ = updateRegionInfo_; + } else { + result.updateRegionInfo_ = updateRegionInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance()) return this; + if (updateRegionInfoBuilder_ == null) { + if (!other.updateRegionInfo_.isEmpty()) { + if (updateRegionInfo_.isEmpty()) { + updateRegionInfo_ = other.updateRegionInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.addAll(other.updateRegionInfo_); + } + onChanged(); + } + } else { + if (!other.updateRegionInfo_.isEmpty()) { + if (updateRegionInfoBuilder_.isEmpty()) { + updateRegionInfoBuilder_.dispose(); + updateRegionInfoBuilder_ = null; + updateRegionInfo_ = other.updateRegionInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + updateRegionInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getUpdateRegionInfoFieldBuilder() : null; + } else { + updateRegionInfoBuilder_.addAllMessages(other.updateRegionInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getUpdateRegionInfoCount(); i++) { + if (!getUpdateRegionInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addUpdateRegionInfo(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .OpenRegionRequest.RegionOpenInfo updateRegionInfo = 1; + private java.util.List updateRegionInfo_ = + java.util.Collections.emptyList(); + private void ensureUpdateRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + updateRegionInfo_ = new java.util.ArrayList(updateRegionInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder> updateRegionInfoBuilder_; + + public java.util.List getUpdateRegionInfoList() { + if (updateRegionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(updateRegionInfo_); + } else { + return updateRegionInfoBuilder_.getMessageList(); + } + } + public int getUpdateRegionInfoCount() { + if (updateRegionInfoBuilder_ == null) { + return updateRegionInfo_.size(); + } else { + return updateRegionInfoBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getUpdateRegionInfo(int index) { + if (updateRegionInfoBuilder_ == null) { + return updateRegionInfo_.get(index); + } else { + return updateRegionInfoBuilder_.getMessage(index); + } + } + public Builder setUpdateRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo value) { + if (updateRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.set(index, value); + onChanged(); + } else { + updateRegionInfoBuilder_.setMessage(index, value); + } + return this; + } + public Builder setUpdateRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder builderForValue) { + if (updateRegionInfoBuilder_ == null) { + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + updateRegionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addUpdateRegionInfo(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo value) { + if (updateRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.add(value); + onChanged(); + } else { + updateRegionInfoBuilder_.addMessage(value); + } + return this; + } + public Builder addUpdateRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo value) { + if (updateRegionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.add(index, value); + onChanged(); + } else { + updateRegionInfoBuilder_.addMessage(index, value); + } + return this; + } + public Builder addUpdateRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder builderForValue) { + if (updateRegionInfoBuilder_ == null) { + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.add(builderForValue.build()); + onChanged(); + } else { + updateRegionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addUpdateRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder builderForValue) { + if (updateRegionInfoBuilder_ == null) { + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + updateRegionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllUpdateRegionInfo( + java.lang.Iterable values) { + if (updateRegionInfoBuilder_ == null) { + ensureUpdateRegionInfoIsMutable(); + super.addAll(values, updateRegionInfo_); + onChanged(); + } else { + updateRegionInfoBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearUpdateRegionInfo() { + if (updateRegionInfoBuilder_ == null) { + updateRegionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + updateRegionInfoBuilder_.clear(); + } + return this; + } + public Builder removeUpdateRegionInfo(int index) { + if (updateRegionInfoBuilder_ == null) { + ensureUpdateRegionInfoIsMutable(); + updateRegionInfo_.remove(index); + onChanged(); + } else { + updateRegionInfoBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder getUpdateRegionInfoBuilder( + int index) { + return getUpdateRegionInfoFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getUpdateRegionInfoOrBuilder( + int index) { + if (updateRegionInfoBuilder_ == null) { + return updateRegionInfo_.get(index); } else { + return updateRegionInfoBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getUpdateRegionInfoOrBuilderList() { + if (updateRegionInfoBuilder_ != null) { + return updateRegionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(updateRegionInfo_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder addUpdateRegionInfoBuilder() { + return getUpdateRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder addUpdateRegionInfoBuilder( + int index) { + return getUpdateRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDefaultInstance()); + } + public java.util.List + getUpdateRegionInfoBuilderList() { + return getUpdateRegionInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder> + getUpdateRegionInfoFieldBuilder() { + if (updateRegionInfoBuilder_ == null) { + updateRegionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder>( + updateRegionInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + updateRegionInfo_ = null; + } + return updateRegionInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:UpdateFavoredNodesRequest) + } + + static { + defaultInstance = new UpdateFavoredNodesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateFavoredNodesRequest) + } + + public interface UpdateFavoredNodesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint32 response = 1; + boolean hasResponse(); + int getResponse(); + } + public static final class UpdateFavoredNodesResponse extends + com.google.protobuf.GeneratedMessage + implements UpdateFavoredNodesResponseOrBuilder { + // Use UpdateFavoredNodesResponse.newBuilder() to construct. + private UpdateFavoredNodesResponse(Builder builder) { + super(builder); + } + private UpdateFavoredNodesResponse(boolean noInit) {} + + private static final UpdateFavoredNodesResponse defaultInstance; + public static UpdateFavoredNodesResponse getDefaultInstance() { + return defaultInstance; + } + + public UpdateFavoredNodesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_fieldAccessorTable; + } + + private int bitField0_; + // optional uint32 response = 1; + public static final int RESPONSE_FIELD_NUMBER = 1; + private int response_; + public boolean hasResponse() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getResponse() { + return response_; + } + + private void initFields() { + response_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, response_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, response_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) obj; + + boolean result = true; + result = result && (hasResponse() == other.hasResponse()); + if (hasResponse()) { + result = result && (getResponse() + == other.getResponse()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasResponse()) { + hash = (37 * hash) + RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getResponse(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_UpdateFavoredNodesResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + response_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.response_ = response_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()) return this; + if (other.hasResponse()) { + setResponse(other.getResponse()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + response_ = input.readUInt32(); + break; + } + } + } + } + + private int bitField0_; + + // optional uint32 response = 1; + private int response_ ; + public boolean hasResponse() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getResponse() { + return response_; + } + public Builder setResponse(int value) { + bitField0_ |= 0x00000001; + response_ = value; + onChanged(); + return this; + } + public Builder clearResponse() { + bitField0_ = (bitField0_ & ~0x00000001); + response_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:UpdateFavoredNodesResponse) + } + + static { + defaultInstance = new UpdateFavoredNodesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateFavoredNodesResponse) + } + public interface MergeRegionsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -14241,6 +15205,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request, com.google.protobuf.RpcCallback done); + public abstract void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -14358,6 +15327,14 @@ public final class AdminProtos { impl.stopServer(controller, request, done); } + @java.lang.Override + public void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, + com.google.protobuf.RpcCallback done) { + impl.updateFavoredNodes(controller, request, done); + } + }; } @@ -14408,6 +15385,8 @@ public final class AdminProtos { return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request); case 13: return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request); + case 14: + return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -14450,6 +15429,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -14492,6 +15473,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -14570,6 +15553,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request, com.google.protobuf.RpcCallback done); + public abstract void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -14662,6 +15650,11 @@ public final class AdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 14: + this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -14704,6 +15697,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -14746,6 +15741,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 13: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + case 14: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -14976,6 +15973,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance())); } + + public void updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -15053,6 +16065,11 @@ public final class AdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -15229,6 +16246,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse updateFavoredNodes( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); + } + } } @@ -15318,6 +16347,16 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CompactRegionResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateFavoredNodesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateFavoredNodesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateFavoredNodesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateFavoredNodesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_MergeRegionsRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -15422,45 +16461,50 @@ public final class AdminProtos { "plitRegionResponse\"W\n\024CompactRegionReque" + "st\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005" + "major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRe" + - "gionResponse\"v\n\023MergeRegionsRequest\022\"\n\010r" + - "egion_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010regio" + - "n_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcible\030" + - "\003 \001(\010:\005false\"\026\n\024MergeRegionsResponse\"X\n\010", - "WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKey\022\027\n\017key_va" + - "lue_bytes\030\002 \003(\014\022\035\n\025associated_cell_count" + - "\030\003 \001(\005\"4\n\030ReplicateWALEntryRequest\022\030\n\005en" + - "try\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateWALEntr" + - "yResponse\"\026\n\024RollWALWriterRequest\"0\n\025Rol" + - "lWALWriterResponse\022\027\n\017region_to_flush\030\001 " + - "\003(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t" + - "\"\024\n\022StopServerResponse\"\026\n\024GetServerInfoR" + - "equest\"B\n\nServerInfo\022 \n\013server_name\030\001 \002(" + - "\0132\013.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"9\n\025Ge", - "tServerInfoResponse\022 \n\013server_info\030\001 \002(\013" + - "2\013.ServerInfo2\337\006\n\014AdminService\022>\n\rGetReg" + - "ionInfo\022\025.GetRegionInfoRequest\032\026.GetRegi" + - "onInfoResponse\022;\n\014GetStoreFile\022\024.GetStor" + - "eFileRequest\032\025.GetStoreFileResponse\022D\n\017G" + - "etOnlineRegion\022\027.GetOnlineRegionRequest\032" + - "\030.GetOnlineRegionResponse\0225\n\nOpenRegion\022" + - "\022.OpenRegionRequest\032\023.OpenRegionResponse" + - "\0228\n\013CloseRegion\022\023.CloseRegionRequest\032\024.C" + - "loseRegionResponse\0228\n\013FlushRegion\022\023.Flus", - "hRegionRequest\032\024.FlushRegionResponse\0228\n\013" + - "SplitRegion\022\023.SplitRegionRequest\032\024.Split" + - "RegionResponse\022>\n\rCompactRegion\022\025.Compac" + - "tRegionRequest\032\026.CompactRegionResponse\022;" + - "\n\014MergeRegions\022\024.MergeRegionsRequest\032\025.M" + - "ergeRegionsResponse\022J\n\021ReplicateWALEntry" + - "\022\031.ReplicateWALEntryRequest\032\032.ReplicateW" + - "ALEntryResponse\022\'\n\006Replay\022\r.MultiRequest" + - "\032\016.MultiResponse\022>\n\rRollWALWriter\022\025.Roll" + - "WALWriterRequest\032\026.RollWALWriterResponse", - "\022>\n\rGetServerInfo\022\025.GetServerInfoRequest" + - "\032\026.GetServerInfoResponse\0225\n\nStopServer\022\022" + - ".StopServerRequest\032\023.StopServerResponseB" + - "A\n*org.apache.hadoop.hbase.protobuf.gene" + - "ratedB\013AdminProtosH\001\210\001\001\240\001\001" + "gionResponse\"X\n\031UpdateFavoredNodesReques" + + "t\022;\n\020updateRegionInfo\030\001 \003(\0132!.OpenRegion" + + "Request.RegionOpenInfo\".\n\032UpdateFavoredN" + + "odesResponse\022\020\n\010response\030\001 \001(\r\"v\n\023MergeR", + "egionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Region" + + "Specifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpec" + + "ifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024MergeR" + + "egionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(\0132" + + "\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025ass" + + "ociated_cell_count\030\003 \001(\005\"4\n\030ReplicateWAL" + + "EntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033" + + "\n\031ReplicateWALEntryResponse\"\026\n\024RollWALWr" + + "iterRequest\"0\n\025RollWALWriterResponse\022\027\n\017" + + "region_to_flush\030\001 \003(\014\"#\n\021StopServerReque", + "st\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerResponse" + + "\"\026\n\024GetServerInfoRequest\"B\n\nServerInfo\022 " + + "\n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\nwebu" + + "i_port\030\002 \001(\r\"9\n\025GetServerInfoResponse\022 \n" + + "\013server_info\030\001 \002(\0132\013.ServerInfo2\246\007\n\014Admi" + + "nService\022>\n\rGetRegionInfo\022\025.GetRegionInf" + + "oRequest\032\026.GetRegionInfoResponse\022;\n\014GetS" + + "toreFile\022\024.GetStoreFileRequest\032\025.GetStor" + + "eFileResponse\022D\n\017GetOnlineRegion\022\027.GetOn" + + "lineRegionRequest\032\030.GetOnlineRegionRespo", + "nse\0225\n\nOpenRegion\022\022.OpenRegionRequest\032\023." + + "OpenRegionResponse\0228\n\013CloseRegion\022\023.Clos" + + "eRegionRequest\032\024.CloseRegionResponse\0228\n\013" + + "FlushRegion\022\023.FlushRegionRequest\032\024.Flush" + + "RegionResponse\0228\n\013SplitRegion\022\023.SplitReg" + + "ionRequest\032\024.SplitRegionResponse\022>\n\rComp" + + "actRegion\022\025.CompactRegionRequest\032\026.Compa" + + "ctRegionResponse\022;\n\014MergeRegions\022\024.Merge" + + "RegionsRequest\032\025.MergeRegionsResponse\022J\n" + + "\021ReplicateWALEntry\022\031.ReplicateWALEntryRe", + "quest\032\032.ReplicateWALEntryResponse\022\'\n\006Rep" + + "lay\022\r.MultiRequest\032\016.MultiResponse\022>\n\rRo" + + "llWALWriter\022\025.RollWALWriterRequest\032\026.Rol" + + "lWALWriterResponse\022>\n\rGetServerInfo\022\025.Ge" + + "tServerInfoRequest\032\026.GetServerInfoRespon" + + "se\0225\n\nStopServer\022\022.StopServerRequest\032\023.S" + + "topServerResponse\022E\n\022UpdateFavoredNodes\022" + + "\022.OpenRegionRequest\032\033.UpdateFavoredNodes" + + "ResponseBA\n*org.apache.hadoop.hbase.prot" + + "obuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -15603,8 +16647,24 @@ public final class AdminProtos { new java.lang.String[] { }, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.Builder.class); - internal_static_MergeRegionsRequest_descriptor = + internal_static_UpdateFavoredNodesRequest_descriptor = getDescriptor().getMessageTypes().get(16); + internal_static_UpdateFavoredNodesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFavoredNodesRequest_descriptor, + new java.lang.String[] { "UpdateRegionInfo", }, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.Builder.class); + internal_static_UpdateFavoredNodesResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_UpdateFavoredNodesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFavoredNodesResponse_descriptor, + new java.lang.String[] { "Response", }, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.Builder.class); + internal_static_MergeRegionsRequest_descriptor = + getDescriptor().getMessageTypes().get(18); internal_static_MergeRegionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MergeRegionsRequest_descriptor, @@ -15612,7 +16672,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder.class); internal_static_MergeRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_MergeRegionsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MergeRegionsResponse_descriptor, @@ -15620,7 +16680,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.Builder.class); internal_static_WALEntry_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_WALEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALEntry_descriptor, @@ -15628,7 +16688,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder.class); internal_static_ReplicateWALEntryRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_ReplicateWALEntryRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryRequest_descriptor, @@ -15636,7 +16696,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.Builder.class); internal_static_ReplicateWALEntryResponse_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_ReplicateWALEntryResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryResponse_descriptor, @@ -15644,7 +16704,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.Builder.class); internal_static_RollWALWriterRequest_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_RollWALWriterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RollWALWriterRequest_descriptor, @@ -15652,7 +16712,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.Builder.class); internal_static_RollWALWriterResponse_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_RollWALWriterResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RollWALWriterResponse_descriptor, @@ -15660,7 +16720,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.Builder.class); internal_static_StopServerRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_StopServerRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopServerRequest_descriptor, @@ -15668,7 +16728,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.Builder.class); internal_static_StopServerResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_StopServerResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopServerResponse_descriptor, @@ -15676,7 +16736,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.Builder.class); internal_static_GetServerInfoRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_GetServerInfoRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetServerInfoRequest_descriptor, @@ -15684,7 +16744,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.Builder.class); internal_static_ServerInfo_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_ServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerInfo_descriptor, @@ -15692,7 +16752,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.Builder.class); internal_static_GetServerInfoResponse_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_GetServerInfoResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetServerInfoResponse_descriptor, diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index 8d0398f..58c159f 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -144,6 +144,14 @@ message CompactRegionRequest { message CompactRegionResponse { } +message UpdateFavoredNodesRequest { + repeated OpenRegionRequest.RegionOpenInfo updateRegionInfo = 1; +} + +message UpdateFavoredNodesResponse { + optional uint32 response = 1; +} + /** * Merges the specified regions. *

@@ -250,4 +258,7 @@ service AdminService { rpc StopServer(StopServerRequest) returns(StopServerResponse); + + rpc UpdateFavoredNodes(OpenRegionRequest) + returns(UpdateFavoredNodesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java new file mode 100644 index 0000000..63ee622 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -0,0 +1,594 @@ +/** + * Copyright 2012 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; +import org.apache.hadoop.hbase.master.balancer.FavoredNodes; + +@InterfaceAudience.Private +public class AssignmentVerificationReport { + protected static final Log LOG = LogFactory.getLog( + AssignmentVerificationReport.class.getName()); + + private String tableName = null; + private boolean enforceLocality = false; + private boolean isFilledUp = false; + + private int totalRegions = 0; + private int totalRegionServers = 0; + // for unassigned regions + private List unAssignedRegionsList = + new ArrayList(); + + // For regions without valid favored nodes + private List regionsWithoutValidFavoredNodes = + new ArrayList(); + + // For regions not running on the favored nodes + private List nonFavoredAssignedRegionList = + new ArrayList(); + + // For regions running on the favored nodes + private int totalFavoredAssignments = 0; + private int[] favoredNodes = new int[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; + private float[] favoredNodesLocalitySummary = + new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM]; + private float actualLocalitySummary = 0; + + // For region balancing information + private float avgRegionsOnRS = 0; + private int maxRegionsOnRS = 0; + private int minRegionsOnRS = Integer.MAX_VALUE; + private Set mostLoadedRSSet = + new HashSet(); + private Set leastLoadedRSSet = + new HashSet(); + + private float avgDispersionScore = 0; + private float maxDispersionScore = 0; + private Set maxDispersionScoreServerSet = + new HashSet(); + private float minDispersionScore = Float.MAX_VALUE; + private Set minDispersionScoreServerSet = + new HashSet(); + + private float avgDispersionNum = 0; + private float maxDispersionNum = 0; + private Set maxDispersionNumServerSet = + new HashSet(); + private float minDispersionNum = Float.MAX_VALUE; + private Set minDispersionNumServerSet = + new HashSet(); + + public void fillUp(String tableName, SnapshotOfRegionAssignment snapshot, + Map> regionLocalityMap) { + // Set the table name + this.tableName = tableName; + + // Get all the regions for this table + List regionInfoList = + snapshot.getTableToRegionMap().get(tableName); + // Get the total region num for the current table + this.totalRegions = regionInfoList.size(); + + // Get the existing assignment plan + FavoredNodes favoredNodesAssignment = snapshot.getExistingAssignmentPlan(); + // Get the region to region server mapping + Map currentAssignment = + snapshot.getRegionToRegionServerMap(); + // Initialize the server to its hosing region counter map + Map serverToHostingRegionCounterMap = + new HashMap(); + + Map primaryRSToRegionCounterMap = + new HashMap(); + Map> primaryToSecTerRSMap = + new HashMap>(); + + // Check the favored nodes and its locality information + // Also keep tracker of the most loaded and least loaded region servers + for (HRegionInfo region : regionInfoList) { + try { + ServerName currentRS = currentAssignment.get(region); + // Handle unassigned regions + if (currentRS == null) { + unAssignedRegionsList.add(region); + continue; + } + + // Keep updating the server to is hosting region counter map + Integer hostRegionCounter = serverToHostingRegionCounterMap.get(currentRS); + if (hostRegionCounter == null) { + hostRegionCounter = new Integer(0); + } + hostRegionCounter = hostRegionCounter.intValue() + 1; + serverToHostingRegionCounterMap.put(currentRS, hostRegionCounter); + + // Get the favored nodes from the assignment plan and verify it. + List favoredNodes = favoredNodesAssignment.getFavoredNodes(region); + if (favoredNodes == null || + favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + regionsWithoutValidFavoredNodes.add(region); + continue; + } + // Get the primary, secondary and tertiary region server + ServerName primaryRS = + favoredNodes.get(FavoredNodes.Position.PRIMARY.ordinal()); + ServerName secondaryRS = + favoredNodes.get(FavoredNodes.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = + favoredNodes.get(FavoredNodes.Position.TERTIARY.ordinal()); + + // Update the primary rs to its region set map + Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); + if (regionCounter == null) { + regionCounter = new Integer(0); + } + regionCounter = regionCounter.intValue() + 1; + primaryRSToRegionCounterMap.put(primaryRS, regionCounter); + + // Update the primary rs to secondary and tertiary rs map + Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS); + if (secAndTerSet == null) { + secAndTerSet = new HashSet(); + } + secAndTerSet.add(secondaryRS); + secAndTerSet.add(tertiaryRS); + primaryToSecTerRSMap.put(primaryRS, secAndTerSet); + + // Get the position of the current region server in the favored nodes list + FavoredNodes.Position favoredNodePosition = + FavoredNodes.getFavoredServerPosition(favoredNodes, currentRS); + + // Handle the non favored assignment. + if (favoredNodePosition == null) { + nonFavoredAssignedRegionList.add(region); + continue; + } + // Increase the favored nodes assignment. + this.favoredNodes[favoredNodePosition.ordinal()]++; + totalFavoredAssignments++; + + // Summary the locality information for each favored nodes + if (regionLocalityMap != null) { + // Set the enforce locality as true; + this.enforceLocality = true; + + // Get the region degree locality map + Map regionDegreeLocalityMap = + regionLocalityMap.get(region.getEncodedName()); + if (regionDegreeLocalityMap == null) { + continue; // ignore the region which doesn't have any store files. + } + + // Get the locality summary for each favored nodes + for (FavoredNodes.Position p : FavoredNodes.Position.values()) { + ServerName favoredNode = favoredNodes.get(p.ordinal()); + // Get the locality for the current favored nodes + Float locality = + regionDegreeLocalityMap.get(favoredNode.getHostname()); + if (locality != null) { + this.favoredNodesLocalitySummary[p.ordinal()] += locality; + } + } + + // Get the locality summary for the current region server + Float actualLocality = + regionDegreeLocalityMap.get(currentRS.getHostname()); + if (actualLocality != null) { + this.actualLocalitySummary += actualLocality; + } + } + } catch (Exception e) { + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + + "because of " + e); + } + } + + float dispersionScoreSummary = 0; + float dispersionNumSummary = 0; + // Calculate the secondary score for each primary region server + for (Map.Entry entry : + primaryRSToRegionCounterMap.entrySet()) { + ServerName primaryRS = entry.getKey(); + Integer regionsOnPrimary = entry.getValue(); + + // Process the dispersion number and score + float dispersionScore = 0; + int dispersionNum = 0; + if (primaryToSecTerRSMap.get(primaryRS) != null + && regionsOnPrimary.intValue() != 0) { + dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); + dispersionScore = dispersionNum / + ((float) regionsOnPrimary.intValue() * 2); + } + // Update the max dispersion score + if (dispersionScore > this.maxDispersionScore) { + this.maxDispersionScoreServerSet.clear(); + this.maxDispersionScoreServerSet.add(primaryRS); + this.maxDispersionScore = dispersionScore; + } else if (dispersionScore == this.maxDispersionScore) { + this.maxDispersionScoreServerSet.add(primaryRS); + } + + // Update the max dispersion num + if (dispersionNum > this.maxDispersionNum) { + this.maxDispersionNumServerSet.clear(); + this.maxDispersionNumServerSet.add(primaryRS); + this.maxDispersionNum = dispersionNum; + } else if (dispersionNum == this.maxDispersionNum) { + this.maxDispersionNumServerSet.add(primaryRS); + } + + // Update the min dispersion score + if (dispersionScore < this.minDispersionScore) { + this.minDispersionScoreServerSet.clear(); + this.minDispersionScoreServerSet.add(primaryRS); + this.minDispersionScore = dispersionScore; + } else if (dispersionScore == this.minDispersionScore) { + this.minDispersionScoreServerSet.add(primaryRS); + } + + // Update the min dispersion num + if (dispersionNum < this.minDispersionNum) { + this.minDispersionNumServerSet.clear(); + this.minDispersionNumServerSet.add(primaryRS); + this.minDispersionNum = dispersionNum; + } else if (dispersionNum == this.minDispersionNum) { + this.minDispersionNumServerSet.add(primaryRS); + } + + dispersionScoreSummary += dispersionScore; + dispersionNumSummary += dispersionNum; + } + + // Update the avg dispersion score + if (primaryRSToRegionCounterMap.keySet().size() != 0) { + this.avgDispersionScore = dispersionScoreSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = dispersionNumSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + } + + // Fill up the most loaded and least loaded region server information + for (Map.Entry entry : + serverToHostingRegionCounterMap.entrySet()) { + ServerName currentRS = entry.getKey(); + int hostRegionCounter = entry.getValue().intValue(); + + // Update the most loaded region server list and maxRegionsOnRS + if (hostRegionCounter > this.maxRegionsOnRS) { + maxRegionsOnRS = hostRegionCounter; + this.mostLoadedRSSet.clear(); + this.mostLoadedRSSet.add(currentRS); + } else if (hostRegionCounter == this.maxRegionsOnRS) { + this.mostLoadedRSSet.add(currentRS); + } + + // Update the least loaded region server list and minRegionsOnRS + if (hostRegionCounter < this.minRegionsOnRS) { + this.minRegionsOnRS = hostRegionCounter; + this.leastLoadedRSSet.clear(); + this.leastLoadedRSSet.add(currentRS); + } else if (hostRegionCounter == this.minRegionsOnRS) { + this.leastLoadedRSSet.add(currentRS); + } + } + + // and total region servers + this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size(); + this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 : + (totalRegions / (float) totalRegionServers); + // Set the isFilledUp as true + isFilledUp = true; + } + + /** + * Use this to project the dispersion scores + * @param tableName + * @param snapshot + * @param newPlan + */ + public void fillUpDispersion(String tableName, + SnapshotOfRegionAssignment snapshot, FavoredNodes newPlan) { + // Set the table name + this.tableName = tableName; + // Get all the regions for this table + List regionInfoList = snapshot.getTableToRegionMap().get( + tableName); + // Get the total region num for the current table + this.totalRegions = regionInfoList.size(); + FavoredNodes plan = null; + if (newPlan == null) { + plan = snapshot.getExistingAssignmentPlan(); + } else { + plan = newPlan; + } + // Get the region to region server mapping + Map primaryRSToRegionCounterMap = + new HashMap(); + Map> primaryToSecTerRSMap = + new HashMap>(); + + // Check the favored nodes and its locality information + // Also keep tracker of the most loaded and least loaded region servers + for (HRegionInfo region : regionInfoList) { + try { + // Get the favored nodes from the assignment plan and verify it. + List favoredNodes = plan.getFavoredNodes(region); + if (favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + regionsWithoutValidFavoredNodes.add(region); + continue; + } + // Get the primary, secondary and tertiary region server + ServerName primaryRS = favoredNodes + .get(FavoredNodes.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes + .get(FavoredNodes.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes + .get(FavoredNodes.Position.TERTIARY.ordinal()); + + // Update the primary rs to its region set map + Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); + if (regionCounter == null) { + regionCounter = new Integer(0); + } + regionCounter = regionCounter.intValue() + 1; + primaryRSToRegionCounterMap.put(primaryRS, regionCounter); + + // Update the primary rs to secondary and tertiary rs map + Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS); + if (secAndTerSet == null) { + secAndTerSet = new HashSet(); + } + secAndTerSet.add(secondaryRS); + secAndTerSet.add(tertiaryRS); + primaryToSecTerRSMap.put(primaryRS, secAndTerSet); + } catch (Exception e) { + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + + "because of " + e); + } + } + float dispersionScoreSummary = 0; + float dispersionNumSummary = 0; + // Calculate the secondary score for each primary region server + for (Map.Entry entry : + primaryRSToRegionCounterMap.entrySet()) { + ServerName primaryRS = entry.getKey(); + Integer regionsOnPrimary = entry.getValue(); + + // Process the dispersion number and score + float dispersionScore = 0; + int dispersionNum = 0; + if (primaryToSecTerRSMap.get(primaryRS) != null + && regionsOnPrimary.intValue() != 0) { + dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); + dispersionScore = dispersionNum / + ((float) regionsOnPrimary.intValue() * 2); + } + + // Update the max dispersion num + if (dispersionNum > this.maxDispersionNum) { + this.maxDispersionNumServerSet.clear(); + this.maxDispersionNumServerSet.add(primaryRS); + this.maxDispersionNum = dispersionNum; + } else if (dispersionNum == this.maxDispersionNum) { + this.maxDispersionNumServerSet.add(primaryRS); + } + + // Update the min dispersion score + if (dispersionScore < this.minDispersionScore) { + this.minDispersionScoreServerSet.clear(); + this.minDispersionScoreServerSet.add(primaryRS); + this.minDispersionScore = dispersionScore; + } else if (dispersionScore == this.minDispersionScore) { + this.minDispersionScoreServerSet.add(primaryRS); + } + + // Update the min dispersion num + if (dispersionNum < this.minDispersionNum) { + this.minDispersionNumServerSet.clear(); + this.minDispersionNumServerSet.add(primaryRS); + this.minDispersionNum = dispersionNum; + } else if (dispersionNum == this.minDispersionNum) { + this.minDispersionNumServerSet.add(primaryRS); + } + + dispersionScoreSummary += dispersionScore; + dispersionNumSummary += dispersionNum; + } + + // Update the avg dispersion score + if (primaryRSToRegionCounterMap.keySet().size() != 0) { + this.avgDispersionScore = dispersionScoreSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = dispersionNumSummary / + (float) primaryRSToRegionCounterMap.keySet().size(); + } + } + + /** + * @return list which contains just 3 elements: average dispersion score, max + * dispersion score and min dispersion score as first, second and third element + * respectively. + * + */ + public List getDispersionInformation() { + List dispersion = new ArrayList(); + dispersion.add(avgDispersionScore); + dispersion.add(maxDispersionScore); + dispersion.add(minDispersionScore); + return dispersion; + } + + public void print(boolean isDetailMode) { + if (!isFilledUp) { + System.err.println("[Error] Region assignment verfication report" + + "hasn't been filled up"); + } + DecimalFormat df = new java.text.DecimalFormat( "#.##"); + + // Print some basic information + System.out.println("Region Assignment Verification for Table: " + tableName + + "\n\tTotal regions : " + totalRegions); + + // Print the number of regions on each kinds of the favored nodes + System.out.println("\tTotal regions on favored nodes " + + totalFavoredAssignments); + for (FavoredNodes.Position p : FavoredNodes.Position.values()) { + System.out.println("\t\tTotal regions on "+ p.toString() + + " region servers: " + favoredNodes[p.ordinal()]); + } + // Print the number of regions in each kinds of invalid assignment + System.out.println("\tTotal unassigned regions: " + + unAssignedRegionsList.size()); + if (isDetailMode) { + for (HRegionInfo region : unAssignedRegionsList) { + System.out.println("\t\t" + region.getRegionNameAsString()); + } + } + + System.out.println("\tTotal regions NOT on favored nodes: " + + nonFavoredAssignedRegionList.size()); + if (isDetailMode) { + for (HRegionInfo region : nonFavoredAssignedRegionList) { + System.out.println("\t\t" + region.getRegionNameAsString()); + } + } + + System.out.println("\tTotal regions without favored nodes: " + + regionsWithoutValidFavoredNodes.size()); + if (isDetailMode) { + for (HRegionInfo region : regionsWithoutValidFavoredNodes) { + System.out.println("\t\t" + region.getRegionNameAsString()); + } + } + + // Print the locality information if enabled + if (this.enforceLocality && totalRegions != 0) { + // Print the actual locality for this table + float actualLocality = 100 * + this.actualLocalitySummary / (float) totalRegions; + System.out.println("\n\tThe actual avg locality is " + + df.format(actualLocality) + " %"); + + // Print the expected locality if regions are placed on the each kinds of + // favored nodes + for (FavoredNodes.Position p : FavoredNodes.Position.values()) { + float avgLocality = 100 * + (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); + System.out.println("\t\tThe expected avg locality if all regions" + + " on the " + p.toString() + " region servers: " + + df.format(avgLocality) + " %"); + } + } + + // Print the region balancing information + System.out.println("\n\tTotal hosting region servers: " + + totalRegionServers); + // Print the region balance information + if (totalRegionServers != 0) { + System.out.println( + "\tAvg dispersion num: " +df.format(avgDispersionNum) + + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + + " hosts;"); + + System.out.println("\t\tThe number of the region servers with the max" + + " dispersion num: " + this.maxDispersionNumServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(maxDispersionNumServerSet); + } + + System.out.println("\t\tThe number of the region servers with the min" + + " dispersion num: " + this.minDispersionNumServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(maxDispersionNumServerSet); + } + + System.out.println( + "\tAvg dispersion score: " + df.format(avgDispersionScore) + + ";\tMax dispersion score: " + df.format(maxDispersionScore) + + ";\tMin dispersion score: " + df.format(minDispersionScore) + ";"); + + System.out.println("\t\tThe number of the region servers with the max" + + " dispersion score: " + this.maxDispersionScoreServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(maxDispersionScoreServerSet); + } + + System.out.println("\t\tThe number of the region servers with the min" + + " dispersion score: " + this.minDispersionScoreServerSet.size()); + if (isDetailMode) { + printHServerAddressSet(minDispersionScoreServerSet); + } + + System.out.println( + "\tAvg regions/region server: " + df.format(avgRegionsOnRS) + + ";\tMax regions/region server: " + maxRegionsOnRS + + ";\tMin regions/region server: " + minRegionsOnRS + ";"); + + // Print the details about the most loaded region servers + System.out.println("\t\tThe number of the most loaded region servers: " + + mostLoadedRSSet.size()); + if (isDetailMode) { + printHServerAddressSet(mostLoadedRSSet); + } + + // Print the details about the least loaded region servers + System.out.println("\t\tThe number of the least loaded region servers: " + + leastLoadedRSSet.size()); + if (isDetailMode) { + printHServerAddressSet(leastLoadedRSSet); + } + } + System.out.println("=============================="); + } + + private void printHServerAddressSet(Set serverSet) { + if (serverSet == null) { + return ; + } + int i = 0; + for (ServerName addr : serverSet){ + if ((i++) % 3 == 0) { + System.out.print("\n\t\t\t"); + } + System.out.print(addr.getHostAndPort() + " ; "); + } + System.out.println("\n"); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java new file mode 100644 index 0000000..7ae65b5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java @@ -0,0 +1,1107 @@ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Scanner; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; +import org.apache.hadoop.hbase.master.balancer.FavoredNodes; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.MunkresAssignment; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Triple; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +@InterfaceAudience.Private +class RegionPlacementMaintainer { + private static final Log LOG = LogFactory.getLog(RegionPlacementMaintainer.class + .getName()); + //The cost of a placement that should never be assigned. + private static final float MAX_COST = Float.POSITIVE_INFINITY; + + // The cost of a placement that is undesirable but acceptable. + private static final float AVOID_COST = 100000f; + + // The amount by which the cost of a placement is increased if it is the + // last slot of the server. This is done to more evenly distribute the slop + // amongst servers. + private static final float LAST_SLOT_COST_PENALTY = 0.5f; + + // The amount by which the cost of a primary placement is penalized if it is + // not the host currently serving the region. This is done to minimize moves. + private static final float NOT_CURRENT_HOST_PENALTY = 0.1f; + + private static boolean USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = false; + + private Configuration conf; + private final boolean enforceLocality; + private final boolean enforceMinAssignmentMove; + private HBaseAdmin admin; + private RackManager rackManager; + private Set targetTableSet; + + public RegionPlacementMaintainer(Configuration conf, boolean enforceLocality, + boolean enforceMinAssignmentMove) { + this.conf = conf; + this.enforceLocality = enforceLocality; + this.enforceMinAssignmentMove = enforceMinAssignmentMove; + this.targetTableSet = new HashSet(); + this.rackManager = new RackManager(conf); + } + private static void printHelp(Options opt) { + new HelpFormatter().printHelp( + "RegionPlacement < -w | -u | -n | -v | -t | -h | -overwrite -r regionName -f favoredNodes " + + "-diff>" + + " [-l false] [-m false] [-d] [-tables t1,t2,...tn] [-zk zk1,zk2,zk3]" + + " [-fs hdfs://a.b.c.d:9000] [-hbase_root /HBASE]", opt); + } + + public void setTargetTableName(String[] tableNames) { + if (tableNames != null) { + for (String table : tableNames) + this.targetTableSet.add(table); + } + } + + /** + * @return the cached HBaseAdmin + * @throws IOException + */ + private HBaseAdmin getHBaseAdmin() throws IOException { + if (this.admin == null) { + this.admin = new HBaseAdmin(this.conf); + } + return this.admin; + } + + /** + * @return the new RegionAssignmentSnapshot + * @throws IOException + */ + public SnapshotOfRegionAssignment getRegionAssignmentSnapshot() + throws IOException { + SnapshotOfRegionAssignment currentAssignmentShapshot = + new SnapshotOfRegionAssignment(this.conf); + currentAssignmentShapshot.initialize(); + return currentAssignmentShapshot; + } + + /** + * Verify the region placement is consistent with the assignment plan; + * @throws IOException + */ + public void verifyRegionPlacement(boolean isDetailMode) throws IOException { + System.out.println("Start to verify the region assignment and " + + "generate the verification report"); + // Get the region assignment snapshot + SnapshotOfRegionAssignment snapshot = this.getRegionAssignmentSnapshot(); + + // Get all the tables + Set tables = snapshot.getTableSet(); + + // Get the region locality map + Map> regionLocalityMap = null; + if (this.enforceLocality == true) { + regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf); + } + // Iterate all the tables to fill up the verification report + for (String table : tables) { + if (!this.targetTableSet.isEmpty() && + !this.targetTableSet.contains(table)) { + continue; + } + AssignmentVerificationReport report = new AssignmentVerificationReport(); + report.fillUp(table, snapshot, regionLocalityMap); + report.print(isDetailMode); + } + } + + /** + * Generate the assignment plan for the existing table + * + * @param tableName + * @param assignmentSnapshot + * @param regionLocalityMap + * @param plan + * @param munkresForSecondaryAndTertiary if set on true the assignment plan + * for the tertiary and secondary will be generated with Munkres algorithm, + * otherwise will be generated using placeSecondaryAndTertiaryRS + * @throws IOException + */ + private void genAssignmentPlan(String tableName, + SnapshotOfRegionAssignment assignmentSnapshot, + Map> regionLocalityMap, FavoredNodes plan, + boolean munkresForSecondaryAndTertiary) throws IOException { + // Get the all the regions for the current table + List regions = + assignmentSnapshot.getTableToRegionMap().get(tableName); + int numRegions = regions.size(); + + // Get the current assignment map + Map currentAssignmentMap = + assignmentSnapshot.getRegionToRegionServerMap(); + + // Get the all the region servers + List servers = new ArrayList(); + servers.addAll(admin.getClusterStatus().getServers()); + + LOG.info("Start to generate assignment plan for " + numRegions + + " regions from table " + tableName + " with " + + servers.size() + " region servers"); + + int slotsPerServer = (int) Math.ceil((float) numRegions / + servers.size()); + int regionSlots = slotsPerServer * servers.size(); + + // Compute the primary, secondary and tertiary costs for each region/server + // pair. These costs are based only on node locality and rack locality, and + // will be modified later. + float[][] primaryCost = new float[numRegions][regionSlots]; + float[][] secondaryCost = new float[numRegions][regionSlots]; + float[][] tertiaryCost = new float[numRegions][regionSlots]; + + if (this.enforceLocality && regionLocalityMap != null) { + // Transform the locality mapping into a 2D array, assuming that any + // unspecified locality value is 0. + float[][] localityPerServer = new float[numRegions][regionSlots]; + for (int i = 0; i < numRegions; i++) { + Map serverLocalityMap = + regionLocalityMap.get(regions.get(i).getEncodedName()); + if (serverLocalityMap == null) { + continue; + } + for (int j = 0; j < servers.size(); j++) { + String serverName = servers.get(j).getHostname(); + if (serverName == null) { + continue; + } + Float locality = serverLocalityMap.get(serverName); + if (locality == null) { + continue; + } + for (int k = 0; k < slotsPerServer; k++) { + // If we can't find the locality of a region to a server, which occurs + // because locality is only reported for servers which have some + // blocks of a region local, then the locality for that pair is 0. + localityPerServer[i][j * slotsPerServer + k] = locality.floatValue(); + } + } + } + + // Compute the total rack locality for each region in each rack. The total + // rack locality is the sum of the localities of a region on all servers in + // a rack. + Map> rackRegionLocality = + new HashMap>(); + for (int i = 0; i < numRegions; i++) { + HRegionInfo region = regions.get(i); + for (int j = 0; j < regionSlots; j += slotsPerServer) { + String rack = rackManager.getRack(servers.get(j / slotsPerServer)); + Map rackLocality = rackRegionLocality.get(rack); + if (rackLocality == null) { + rackLocality = new HashMap(); + rackRegionLocality.put(rack, rackLocality); + } + Float localityObj = rackLocality.get(region); + float locality = localityObj == null ? 0 : localityObj.floatValue(); + locality += localityPerServer[i][j]; + rackLocality.put(region, locality); + } + } + for (int i = 0; i < numRegions; i++) { + for (int j = 0; j < regionSlots; j++) { + String rack = rackManager.getRack(servers.get(j / slotsPerServer)); + Float totalRackLocalityObj = + rackRegionLocality.get(rack).get(regions.get(i)); + float totalRackLocality = totalRackLocalityObj == null ? + 0 : totalRackLocalityObj.floatValue(); + + // Primary cost aims to favor servers with high node locality and low + // rack locality, so that secondaries and tertiaries can be chosen for + // nodes with high rack locality. This might give primaries with + // slightly less locality at first compared to a cost which only + // considers the node locality, but should be better in the long run. + primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] - + totalRackLocality); + + // Secondary cost aims to favor servers with high node locality and high + // rack locality since the tertiary will be chosen from the same rack as + // the secondary. This could be negative, but that is okay. + secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality); + + // Tertiary cost is only concerned with the node locality. It will later + // be restricted to only hosts on the same rack as the secondary. + tertiaryCost[i][j] = 1 - localityPerServer[i][j]; + } + } + } + + if (this.enforceMinAssignmentMove && currentAssignmentMap != null) { + // We want to minimize the number of regions which move as the result of a + // new assignment. Therefore, slightly penalize any placement which is for + // a host that is not currently serving the region. + for (int i = 0; i < numRegions; i++) { + for (int j = 0; j < servers.size(); j++) { + ServerName currentAddress = currentAssignmentMap.get(regions.get(i)); + if (currentAddress != null && + !currentAddress.equals(servers.get(j))) { + for (int k = 0; k < slotsPerServer; k++) { + primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY; + } + } + } + } + } + + // Artificially increase cost of last slot of each server to evenly + // distribute the slop, otherwise there will be a few servers with too few + // regions and many servers with the max number of regions. + for (int i = 0; i < numRegions; i++) { + for (int j = 0; j < regionSlots; j += slotsPerServer) { + primaryCost[i][j] += LAST_SLOT_COST_PENALTY; + secondaryCost[i][j] += LAST_SLOT_COST_PENALTY; + tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY; + } + } + + RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions, + regionSlots); + primaryCost = randomizedMatrix.transform(primaryCost); + int[] primaryAssignment = new MunkresAssignment(primaryCost).solve(); + primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment); + + // Modify the secondary and tertiary costs for each region/server pair to + // prevent a region from being assigned to the same rack for both primary + // and either one of secondary or tertiary. + for (int i = 0; i < numRegions; i++) { + int slot = primaryAssignment[i]; + String rack = rackManager.getRack(servers.get(slot / slotsPerServer)); + for (int k = 0; k < servers.size(); k++) { + if (!rackManager.getRack(servers.get(k)).equals(rack)) { + continue; + } + if (k == slot / slotsPerServer) { + // Same node, do not place secondary or tertiary here ever. + for (int m = 0; m < slotsPerServer; m++) { + secondaryCost[i][k * slotsPerServer + m] = MAX_COST; + tertiaryCost[i][k * slotsPerServer + m] = MAX_COST; + } + } else { + // Same rack, do not place secondary or tertiary here if possible. + for (int m = 0; m < slotsPerServer; m++) { + secondaryCost[i][k * slotsPerServer + m] = AVOID_COST; + tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST; + } + } + } + } + if (munkresForSecondaryAndTertiary) { + randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); + secondaryCost = randomizedMatrix.transform(secondaryCost); + int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve(); + secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment); + + // Modify the tertiary costs for each region/server pair to ensure that a + // region is assigned to a tertiary server on the same rack as its secondary + // server, but not the same server in that rack. + for (int i = 0; i < numRegions; i++) { + int slot = secondaryAssignment[i]; + String rack = rackManager.getRack(servers.get(slot / slotsPerServer)); + for (int k = 0; k < servers.size(); k++) { + if (k == slot / slotsPerServer) { + // Same node, do not place tertiary here ever. + for (int m = 0; m < slotsPerServer; m++) { + tertiaryCost[i][k * slotsPerServer + m] = MAX_COST; + } + } else { + if (rackManager.getRack(servers.get(k)).equals(rack)) { + continue; + } + // Different rack, do not place tertiary here if possible. + for (int m = 0; m < slotsPerServer; m++) { + tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST; + } + } + } + } + + randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); + tertiaryCost = randomizedMatrix.transform(tertiaryCost); + int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve(); + tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment); + + for (int i = 0; i < numRegions; i++) { + List favoredServers = + new ArrayList(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + ServerName s = servers.get(primaryAssignment[i] / slotsPerServer); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + s = servers.get(secondaryAssignment[i] / slotsPerServer); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + s = servers.get(tertiaryAssignment[i] / slotsPerServer); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + // Update the assignment plan + plan.updateAssignmentPlan(regions.get(i), favoredServers); + } + LOG.info("Generated the assignment plan for " + numRegions + + " regions from table " + tableName + " with " + + servers.size() + " region servers"); + LOG.info("Assignment plan for secondary and tertiary generated " + + "using MunkresAssignment"); + } else { + Map primaryRSMap = new HashMap(); + for (int i = 0; i < numRegions; i++) { + primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer)); + } + FavoredNodeAssignmentHelper favoredNodeHelper = + new FavoredNodeAssignmentHelper(servers, conf); + Map> secondaryAndTertiaryMap = + favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap); + for (int i = 0; i < numRegions; i++) { + List favoredServers = + new ArrayList(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + HRegionInfo currentRegion = regions.get(i); + ServerName s = primaryRSMap.get(currentRegion); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + Pair secondaryAndTertiary = + secondaryAndTertiaryMap.get(currentRegion); + s = secondaryAndTertiary.getFirst(); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + + s = secondaryAndTertiary.getSecond(); + favoredServers.add(new ServerName(s.getHostname(), s.getPort(), + ServerName.NON_STARTCODE)); + // Update the assignment plan + plan.updateAssignmentPlan(regions.get(i), favoredServers); + } + LOG.info("Generated the assignment plan for " + numRegions + + " regions from table " + tableName + " with " + + servers.size() + " region servers"); + LOG.info("Assignment plan for secondary and tertiary generated " + + "using placeSecondaryAndTertiaryWithRestrictions method"); + } + } + + public FavoredNodes getNewAssignmentPlan() throws IOException { + // Get the current region assignment snapshot by scanning from the META + SnapshotOfRegionAssignment assignmentSnapshot = + this.getRegionAssignmentSnapshot(); + + // Get the region locality map + Map> regionLocalityMap = null; + if (this.enforceLocality) { + regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf); + } + // Initialize the assignment plan + FavoredNodes plan = new FavoredNodes(); + + // Get the table to region mapping + Map> tableToRegionMap = + assignmentSnapshot.getTableToRegionMap(); + LOG.info("Start to generate the new assignment plan for the " + + + tableToRegionMap.keySet().size() + " tables" ); + for (String table : tableToRegionMap.keySet()) { + try { + if (!this.targetTableSet.isEmpty() && + !this.targetTableSet.contains(table)) { + continue; + } + // TODO: maybe run the placement in parallel for each table + genAssignmentPlan(table, assignmentSnapshot, regionLocalityMap, plan, + USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY); + } catch (Exception e) { + LOG.error("Get some exceptions for placing primary region server" + + "for table " + table + " because " + e); + } + } + LOG.info("Finish to generate the new assignment plan for the " + + + tableToRegionMap.keySet().size() + " tables" ); + return plan; + } + + /** + * Some algorithms for solving the assignment problem may traverse workers or + * jobs in linear order which may result in skewing the assignments of the + * first jobs in the matrix toward the last workers in the matrix if the + * costs are uniform. To avoid this kind of clumping, we can randomize the + * rows and columns of the cost matrix in a reversible way, such that the + * solution to the assignment problem can be interpreted in terms of the + * original untransformed cost matrix. Rows and columns are transformed + * independently such that the elements contained in any row of the input + * matrix are the same as the elements in the corresponding output matrix, + * and each row has its elements transformed in the same way. Similarly for + * columns. + */ + protected static class RandomizedMatrix { + private final int rows; + private final int cols; + private final int[] rowTransform; + private final int[] rowInverse; + private final int[] colTransform; + private final int[] colInverse; + + /** + * Create a randomization scheme for a matrix of a given size. + * @param rows the number of rows in the matrix + * @param cols the number of columns in the matrix + */ + public RandomizedMatrix(int rows, int cols) { + this.rows = rows; + this.cols = cols; + Random random = new Random(); + rowTransform = new int[rows]; + rowInverse = new int[rows]; + for (int i = 0; i < rows; i++) { + rowTransform[i] = i; + } + // Shuffle the row indices. + for (int i = rows - 1; i >= 0; i--) { + int r = random.nextInt(i + 1); + int temp = rowTransform[r]; + rowTransform[r] = rowTransform[i]; + rowTransform[i] = temp; + } + // Generate the inverse row indices. + for (int i = 0; i < rows; i++) { + rowInverse[rowTransform[i]] = i; + } + + colTransform = new int[cols]; + colInverse = new int[cols]; + for (int i = 0; i < cols; i++) { + colTransform[i] = i; + } + // Shuffle the column indices. + for (int i = cols - 1; i >= 0; i--) { + int r = random.nextInt(i + 1); + int temp = colTransform[r]; + colTransform[r] = colTransform[i]; + colTransform[i] = temp; + } + // Generate the inverse column indices. + for (int i = 0; i < cols; i++) { + colInverse[colTransform[i]] = i; + } + } + + /** + * Copy a given matrix into a new matrix, transforming each row index and + * each column index according to the randomization scheme that was created + * at construction time. + * @param matrix the cost matrix to transform + * @return a new matrix with row and column indices transformed + */ + public float[][] transform(float[][] matrix) { + float[][] result = new float[rows][cols]; + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + result[rowTransform[i]][colTransform[j]] = matrix[i][j]; + } + } + return result; + } + + /** + * Copy a given matrix into a new matrix, transforming each row index and + * each column index according to the inverse of the randomization scheme + * that was created at construction time. + * @param matrix the cost matrix to be inverted + * @return a new matrix with row and column indices inverted + */ + public float[][] invert(float[][] matrix) { + float[][] result = new float[rows][cols]; + for (int i = 0; i < rows; i++) { + for (int j = 0; j < cols; j++) { + result[rowInverse[i]][colInverse[j]] = matrix[i][j]; + } + } + return result; + } + + /** + * Given an array where each element {@code indices[i]} represents the + * randomized column index corresponding to randomized row index {@code i}, + * create a new array with the corresponding inverted indices. + * @param indices an array of transformed indices to be inverted + * @return an array of inverted indices + */ + public int[] invertIndices(int[] indices) { + int[] result = new int[indices.length]; + for (int i = 0; i < indices.length; i++) { + result[rowInverse[i]] = colInverse[indices[i]]; + } + return result; + } + } + + /** + * Print the assignment plan to the system output stream + * @param plan + */ + public static void printAssignmentPlan(FavoredNodes plan) { + if (plan == null) return; + LOG.info("========== Start to print the assignment plan ================"); + // sort the map based on region info + Map> assignmentMap = + new TreeMap>(plan.getAssignmentMap()); + + for (Map.Entry> entry : assignmentMap.entrySet()) { + + String serverList = getFavoredNodesAsString(entry.getValue()); + String regionName = entry.getKey().getRegionNameAsString(); + LOG.info("Region: " + regionName ); + LOG.info("Its favored nodes: " + serverList); + } + LOG.info("========== Finish to print the assignment plan ================"); + } + + public static String getFavoredNodesAsString(List nodes) { + StringBuffer strBuf = new StringBuffer(); + int i = 0; + for (ServerName node : nodes) { + strBuf.append(node); + if (++i != nodes.size()) strBuf.append(","); + } + return strBuf.toString(); + } + + /** + * Update the assignment plan into .META. + * @param plan the assignments plan to be updated into .META. + * @throws IOException if cannot update assignment plan in .META. + */ + public void updateAssignmentPlanToMeta(FavoredNodes plan) + throws IOException { + try { + LOG.info("Start to update the META with the new assignment plan"); + List puts = new ArrayList(); + Map> assignmentMap = + plan.getAssignmentMap(); + for (Map.Entry> entry : assignmentMap.entrySet()) { + byte[] favoredNodes = FavoredNodeAssignmentHelper.getFavoredNodes(entry.getValue()); + Put put = new Put(entry.getKey().getRegionName()); + put.add(HConstants.CATALOG_FAMILY, FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER, + favoredNodes); + puts.add(put); + } + + // Write the region assignments to the meta table. + HTable metaTable = new HTable(conf, HConstants.META_TABLE_NAME); + metaTable.put(puts); + LOG.info("Updated the META with the new assignment plan"); + } catch (Exception e) { + LOG.error("Failed to update META with the new assignment" + + "plan because " + e.getMessage()); + } + } + + /** + * Update the assignment plan to all the region servers + * @param plan + * @throws IOException + */ + private void updateAssignmentPlanToRegionServers(FavoredNodes plan) + throws IOException{ + LOG.info("Start to update the region servers with the new assignment plan"); + // Get the region to region server map + Map> currentAssignment = + this.getRegionAssignmentSnapshot().getRegionServerToRegionMap(); + HConnection connection = this.getHBaseAdmin().getConnection(); + + // track of the failed and succeeded updates + int succeededNum = 0; + Map failedUpdateMap = + new HashMap(); + + for (Map.Entry> entry : + currentAssignment.entrySet()) { + List>> regionUpdateInfos = + new ArrayList>>(); + try { + // Keep track of the favored updates for the current region server + FavoredNodes singleServerPlan = null; + // Find out all the updates for the current region server + for (HRegionInfo region : entry.getValue()) { + List favoredServerList = plan.getFavoredNodes(region); + if (favoredServerList != null && + favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + // Create the single server plan if necessary + if (singleServerPlan == null) { + singleServerPlan = new FavoredNodes(); + } + // Update the single server update + singleServerPlan.updateAssignmentPlan(region, favoredServerList); + } + regionUpdateInfos.add( + new Triple>(region, null, favoredServerList)); + } + if (singleServerPlan != null) { + // Update the current region server with its updated favored nodes + BlockingInterface currentRegionServer = connection.getAdmin(entry.getKey()); + OpenRegionRequest request = + RequestConverter.buildOpenRegionRequest(regionUpdateInfos); + + UpdateFavoredNodesResponse updateFavoredNodesResponse = + currentRegionServer.updateFavoredNodes(null, request); + LOG.info("Region server " + + ProtobufUtil.getServerInfo(currentRegionServer).getServerName() + + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + + singleServerPlan.getAssignmentMap().size() + + " regions with the assignment plan"); + succeededNum ++; + } + } catch (Exception e) { + failedUpdateMap.put(entry.getKey(), e); + } + } + // log the succeeded updates + LOG.info("Updated " + succeededNum + " region servers with " + + "the new assignment plan"); + + // log the failed updates + int failedNum = failedUpdateMap.size(); + if (failedNum != 0) { + LOG.error("Failed to update the following + " + failedNum + + " region servers with its corresponding favored nodes"); + for (Map.Entry entry : + failedUpdateMap.entrySet() ) { + LOG.error("Failed to update " + entry.getKey().getHostAndPort() + + " because of " + entry.getValue().getMessage()); + } + } + } + + public void updateAssignmentPlan(FavoredNodes plan) + throws IOException { + LOG.info("Start to update the new assignment plan for the META table and" + + " the region servers"); + // Update the new assignment plan to META + updateAssignmentPlanToMeta(plan); + // Update the new assignment plan to Region Servers + updateAssignmentPlanToRegionServers(plan); + LOG.info("Finish to update the new assignment plan for the META table and" + + " the region servers"); + } + + /** + * Return how many regions will move per table since their primary RS will + * change + * + * @param newPlanMap - new AssignmentPlan + * @return how many primaries will move per table + */ + public Map getRegionsMovement(FavoredNodes newPlan) + throws IOException { + Map movesPerTable = new HashMap(); + SnapshotOfRegionAssignment snapshot = this.getRegionAssignmentSnapshot(); + Map> tableToRegions = snapshot + .getTableToRegionMap(); + FavoredNodes oldPlan = snapshot.getExistingAssignmentPlan(); + Set tables = snapshot.getTableSet(); + for (String table : tables) { + int movedPrimaries = 0; + if (!this.targetTableSet.isEmpty() + && !this.targetTableSet.contains(table)) { + continue; + } + List regions = tableToRegions.get(table); + for (HRegionInfo region : regions) { + List oldServers = oldPlan.getFavoredNodes(region); + List newServers = newPlan.getFavoredNodes(region); + if (oldServers != null && newServers != null) { + ServerName oldPrimary = oldServers.get(0); + ServerName newPrimary = newServers.get(0); + if (oldPrimary.compareTo(newPrimary) != 0) { + movedPrimaries++; + } + } + } + movesPerTable.put(table, movedPrimaries); + } + return movesPerTable; + } + + /** + * Compares two plans and check whether the locality dropped or increased + * (prints the information as a string) also prints the baseline locality + * + * @param movesPerTable - how many primary regions will move per table + * @param regionLocalityMap - locality map from FS + * @param newPlan - new assignment plan + * @param do we want to run verification report + * @throws IOException + */ + public void checkDifferencesWithOldPlan(Map movesPerTable, + Map> regionLocalityMap, FavoredNodes newPlan) + throws IOException { + // localities for primary, secondary and tertiary + SnapshotOfRegionAssignment snapshot = this.getRegionAssignmentSnapshot(); + FavoredNodes oldPlan = snapshot.getExistingAssignmentPlan(); + Set tables = snapshot.getTableSet(); + Map> tableToRegionsMap = snapshot.getTableToRegionMap(); + for (String table : tables) { + float[] deltaLocality = new float[3]; + float[] locality = new float[3]; + if (!this.targetTableSet.isEmpty() + && !this.targetTableSet.contains(table)) { + continue; + } + List regions = tableToRegionsMap.get(table); + System.out.println("=================================================="); + System.out.println("Assignment Plan Projection Report For Table: " + table); + System.out.println("\t Total regions: " + regions.size()); + System.out.println("\t" + movesPerTable.get(table) + + " primaries will move due to their primary has changed"); + for (HRegionInfo currentRegion : regions) { + Map regionLocality = regionLocalityMap.get(currentRegion + .getEncodedName()); + if (regionLocality == null) { + continue; + } + List oldServers = oldPlan.getFavoredNodes(currentRegion); + List newServers = newPlan.getFavoredNodes(currentRegion); + if (newServers != null && oldServers != null) { + int i=0; + for (FavoredNodes.Position p : FavoredNodes.Position.values()) { + ServerName newServer = newServers.get(p.ordinal()); + ServerName oldServer = oldServers.get(p.ordinal()); + Float oldLocality = 0f; + if (oldServers != null) { + oldLocality = regionLocality.get(oldServer.getHostname()); + if (oldLocality == null) { + oldLocality = 0f; + } + locality[i] += oldLocality; + } + Float newLocality = regionLocality.get(newServer.getHostname()); + if (newLocality == null) { + newLocality = 0f; + } + deltaLocality[i] += newLocality - oldLocality; + i++; + } + } + } + DecimalFormat df = new java.text.DecimalFormat( "#.##"); + for (int i = 0; i < deltaLocality.length; i++) { + System.out.print("\t\t Baseline locality for "); + if (i == 0) { + System.out.print("primary "); + } else if (i == 1) { + System.out.print("secondary "); + } else if (i == 2) { + System.out.print("tertiary "); + } + System.out.println(df.format(100 * locality[i] / regions.size()) + "%"); + System.out.print("\t\t Locality will change with the new plan: "); + System.out.println(df.format(100 * deltaLocality[i] / regions.size()) + + "%"); + } + System.out.println("\t Baseline dispersion"); + printDispersionScores(table, snapshot, regions.size(), null, true); + System.out.println("\t Projected dispersion"); + printDispersionScores(table, snapshot, regions.size(), newPlan, true); + } + } + + public void printDispersionScores(String table, + SnapshotOfRegionAssignment snapshot, int numRegions, FavoredNodes newPlan, + boolean simplePrint) { + if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) { + return; + } + AssignmentVerificationReport report = new AssignmentVerificationReport(); + report.fillUpDispersion(table, snapshot, newPlan); + List dispersion = report.getDispersionInformation(); + if (simplePrint) { + DecimalFormat df = new java.text.DecimalFormat("#.##"); + System.out.println("\tAvg dispersion score: " + + df.format(dispersion.get(0)) + " hosts;\tMax dispersion score: " + + df.format(dispersion.get(1)) + " hosts;\tMin dispersion score: " + + df.format(dispersion.get(2)) + " hosts;"); + } else { + LOG.info("For Table: " + table + " ; #Total Regions: " + numRegions + + " ; The average dispersion score is " + dispersion.get(0)); + } + } + + public void printLocalityAndDispersionForCurrentPlan( + Map> regionLocalityMap) throws IOException { + SnapshotOfRegionAssignment snapshot = this.getRegionAssignmentSnapshot(); + FavoredNodes assignmentPlan = snapshot.getExistingAssignmentPlan(); + Set tables = snapshot.getTableSet(); + Map> tableToRegionsMap = snapshot + .getTableToRegionMap(); + for (String table : tables) { + float[] locality = new float[3]; + if (!this.targetTableSet.isEmpty() + && !this.targetTableSet.contains(table)) { + continue; + } + List regions = tableToRegionsMap.get(table); + for (HRegionInfo currentRegion : regions) { + Map regionLocality = regionLocalityMap.get(currentRegion + .getEncodedName()); + if (regionLocality == null) { + continue; + } + List servers = assignmentPlan.getFavoredNodes(currentRegion); + if (servers != null) { + int i = 0; + for (FavoredNodes.Position p : FavoredNodes.Position.values()) { + ServerName server = servers.get(p.ordinal()); + Float currentLocality = 0f; + if (servers != null) { + currentLocality = regionLocality.get(server.getHostname()); + if (currentLocality == null) { + currentLocality = 0f; + } + locality[i] += currentLocality; + } + i++; + } + } + } + for (int i = 0; i < locality.length; i++) { + String copy = null; + if (i == 0) { + copy = "primary"; + } else if (i == 1) { + copy = "secondary"; + } else if (i == 2) { + copy = "tertiary" ; + } + float avgLocality = 100 * locality[i] / regions.size(); + LOG.info("For Table: " + table + " ; #Total Regions: " + regions.size() + + " ; The average locality for " + copy+ " is " + avgLocality + " %"); + } + printDispersionScores(table, snapshot, regions.size(), null, false); + } + } + + /** + * @param favoredNodes The Stromg of favored nodes + * @return the list of ServerName for the byte array of favored nodes. + */ + public static List getFavoredNodeList(String favoredNodesStr) { + String[] favoredNodesArray = StringUtils.split(favoredNodesStr, ","); + if (favoredNodesArray == null) + return null; + + List serverList = new ArrayList(); + for (String hostNameAndPort : favoredNodesArray) { + serverList.add(new ServerName(hostNameAndPort)); + } + return serverList; + } + + public static void main(String args[]) throws IOException, InterruptedException { + Options opt = new Options(); + opt.addOption("w", "write", false, "write the assignments to META only"); + opt.addOption("u", "update", false, + "update the assignments to META and RegionServers together"); + opt.addOption("n", "dry-run", false, "do not write assignments to META"); + opt.addOption("v", "verify", false, "verify current assignments against META"); + opt.addOption("p", "print", false, "print the current assignment plan in META"); + opt.addOption("h", "help", false, "print usage"); + opt.addOption("d", "verification-details", false, + "print the details of verification report"); + + opt.addOption("zk", true, "to set the zookeeper quorum"); + opt.addOption("fs", true, "to set HDFS"); + opt.addOption("hbase_root", true, "to set hbase_root directory"); + + opt.addOption("overwrite", false, + "overwrite the favored nodes for a single region," + + "for example: -update -r regionName -f server1:port,server2:port,server3:port"); + opt.addOption("r", true, "The region name that needs to be updated"); + opt.addOption("f", true, "The new favored nodes"); + + opt.addOption("tables", true, + "The list of table names splitted by ',' ;" + + "For example: -tables: t1,t2,...,tn"); + opt.addOption("l", "locality", true, "enforce the maxium locality"); + opt.addOption("m", "min-move", true, "enforce minium assignment move"); + opt.addOption("diff", false, "calculate difference between assignment plans"); + opt.addOption("munkres", false, + "use munkres to place secondaries and tertiaries"); + opt.addOption("ld", "locality-dispersion", false, "print locality and dispersion information for current plan"); + opt.addOption("exprack", "expand-with-rack", false, "expand the regions to a new rack"); + try { + // Set the log4j + Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR); + Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.ERROR); + Logger.getLogger("org.apache.hadoop.hbase.master.RegionPlacement") + .setLevel(Level.INFO); + + CommandLine cmd = new GnuParser().parse(opt, args); + Configuration conf = HBaseConfiguration.create(); + + boolean enforceMinAssignmentMove = true; + boolean enforceLocality = true; + boolean verificationDetails = false; + + // Read all the options + if ((cmd.hasOption("l") && + cmd.getOptionValue("l").equalsIgnoreCase("false")) || + (cmd.hasOption("locality") && + cmd.getOptionValue("locality").equalsIgnoreCase("false"))) { + enforceLocality = false; + } + + if ((cmd.hasOption("m") && + cmd.getOptionValue("m").equalsIgnoreCase("false")) || + (cmd.hasOption("min-move") && + cmd.getOptionValue("min-move").equalsIgnoreCase("false"))) { + enforceMinAssignmentMove = false; + } + + if (cmd.hasOption("zk")) { + conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue("zk")); + LOG.info("Setting the zk quorum: " + conf.get(HConstants.ZOOKEEPER_QUORUM)); + } + + if (cmd.hasOption("fs")) { + conf.set(FileSystem.FS_DEFAULT_NAME_KEY, cmd.getOptionValue("fs")); + LOG.info("Setting the HDFS: " + conf.get(FileSystem.FS_DEFAULT_NAME_KEY)); + } + + if (cmd.hasOption("hbase_root")) { + conf.set(HConstants.HBASE_DIR, cmd.getOptionValue("hbase_root")); + LOG.info("Setting the hbase root directory: " + conf.get(HConstants.HBASE_DIR)); + } + + // Create the region placement obj + RegionPlacementMaintainer rp = new RegionPlacementMaintainer(conf, enforceLocality, + enforceMinAssignmentMove); + + if (cmd.hasOption("d") || cmd.hasOption("verification-details")) { + verificationDetails = true; + } + + if (cmd.hasOption("tables")) { + String tableNameListStr = cmd.getOptionValue("tables"); + String[] tableNames = StringUtils.split(tableNameListStr, ","); + rp.setTargetTableName(tableNames); + } + + if (cmd.hasOption("munkres")) { + USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = true; + } + + // Read all the modes + if (cmd.hasOption("v") || cmd.hasOption("verify")) { + // Verify the region placement. + rp.verifyRegionPlacement(verificationDetails); + } else if (cmd.hasOption("n") || cmd.hasOption("dry-run")) { + // Generate the assignment plan only without updating the META and RS + FavoredNodes plan = rp.getNewAssignmentPlan(); + printAssignmentPlan(plan); + } else if (cmd.hasOption("w") || cmd.hasOption("write")) { + // Generate the new assignment plan + FavoredNodes plan = rp.getNewAssignmentPlan(); + // Print the new assignment plan + printAssignmentPlan(plan); + // Write the new assignment plan to META + rp.updateAssignmentPlanToMeta(plan); + } else if (cmd.hasOption("u") || cmd.hasOption("update")) { + // Generate the new assignment plan + FavoredNodes plan = rp.getNewAssignmentPlan(); + // Print the new assignment plan + printAssignmentPlan(plan); + // Update the assignment to META and Region Servers + rp.updateAssignmentPlan(plan); + } else if (cmd.hasOption("diff")) { + FavoredNodes newPlan = rp.getNewAssignmentPlan(); + Map> locality = FSUtils + .getRegionDegreeLocalityMappingFromFS(conf); + Map movesPerTable = rp.getRegionsMovement(newPlan); + rp.checkDifferencesWithOldPlan(movesPerTable, locality, newPlan); + System.out.println("Do you want to update the assignment plan? [y/n]"); + Scanner s = new Scanner(System.in); + String input = s.nextLine().trim(); + if (input.equals("y")) { + System.out.println("Updating assignment plan..."); + rp.updateAssignmentPlan(newPlan); + } + s.close(); + } else if (cmd.hasOption("ld")) { + Map> locality = FSUtils + .getRegionDegreeLocalityMappingFromFS(conf); + rp.printLocalityAndDispersionForCurrentPlan(locality); + } else if (cmd.hasOption("p") || cmd.hasOption("print")) { + FavoredNodes plan = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan(); + printAssignmentPlan(plan); + } else if (cmd.hasOption("overwrite")) { + if (!cmd.hasOption("f") || !cmd.hasOption("r")) { + throw new IllegalArgumentException("Please specify: " + + " -update -r regionName -f server1:port,server2:port,server3:port"); + } + + String regionName = cmd.getOptionValue("r"); + String favoredNodesStr = cmd.getOptionValue("f"); + LOG.info("Going to update the region " + regionName + " with the new favored nodes " + + favoredNodesStr); + List favoredNodes = null; + HRegionInfo regionInfo = + rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap().get(regionName); + if (regionInfo == null) { + LOG.error("Cannot find the region " + regionName + " from the META"); + } else { + try { + favoredNodes = getFavoredNodeList(favoredNodesStr); + } catch (IllegalArgumentException e) { + LOG.error("Cannot parse the invalid favored nodes because " + e); + } + FavoredNodes newPlan = new FavoredNodes(); + newPlan.updateAssignmentPlan(regionInfo, favoredNodes); + rp.updateAssignmentPlan(newPlan); + } + } else { + printHelp(opt); + } + } catch (ParseException e) { + printHelp(opt); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index a452402..91d8a01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.balancer; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -29,6 +28,7 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeMap; +import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -192,7 +192,7 @@ public class FavoredNodeAssignmentHelper { * @param serverList * @return PB'ed bytes of {@link FavoredNodes} generated by the server list. */ - static byte[] getFavoredNodes(List serverAddrList) { + public static byte[] getFavoredNodes(List serverAddrList) { FavoredNodes.Builder f = FavoredNodes.newBuilder(); for (ServerName s : serverAddrList) { HBaseProtos.ServerName.Builder b = HBaseProtos.ServerName.newBuilder(); @@ -302,6 +302,182 @@ public class FavoredNodeAssignmentHelper { return secondaryAndTertiaryMap; } + public Map> mapRSToPrimaries( + Map primaryRSMap) { + Map> primaryServerMap = + new HashMap>(); + for (Entry e : primaryRSMap.entrySet()) { + Set currentSet = primaryServerMap.get(e.getValue()); + if (currentSet == null) { + currentSet = new HashSet(); + } + currentSet.add(e.getKey()); + primaryServerMap.put(e.getValue(), currentSet); + } + return primaryServerMap; + } + + //For regions that share the primary, avoid placing the secondary and tertiary on a same RS + public Map> placeSecondaryAndTertiaryWithRestrictions( + Map primaryRSMap) + throws IOException { + Map> serverToPrimaries = + mapRSToPrimaries(primaryRSMap); + Map> secondaryAndTertiaryMap = + new HashMap>(); + + for (Entry entry : primaryRSMap.entrySet()) { + // Get the target region and its primary region server rack + HRegionInfo regionInfo = entry.getKey(); + ServerName primaryRS = entry.getValue(); + + try { + // Create the secondary and tertiary region server pair object. + Pair pair; + // Get the rack for the primary region server + String primaryRack = rackManager.getRack(primaryRS); + + if (getTotalNumberOfRacks() == 1) { + // Single rack case: have to pick the secondary and tertiary + // from the same rack + List serverList = getServersFromRack(primaryRack); + if (serverList.size() <= 2) { + // Single region server case: cannot not place the favored nodes + // on any server; !domain.canPlaceFavoredNodes() + continue; + } else { + // Randomly select two region servers from the server list and make + // sure + // they are not overlap with the primary region server; + Set serverSkipSet = new HashSet(); + serverSkipSet.add(primaryRS); + + // Place the secondary RS + ServerName secondaryRS = getOneRandomServer(primaryRack, + serverSkipSet); + // Skip the secondary for the tertiary placement + serverSkipSet.add(secondaryRS); + + // Place the tertiary RS + ServerName tertiaryRS = getOneRandomServer(primaryRack, + serverSkipSet); + + if (secondaryRS == null || tertiaryRS == null) { + LOG.error("Cannot place the secondary and terinary" + + "region server for region " + + regionInfo.getRegionNameAsString()); + } + // Create the secondary and tertiary pair + pair = new Pair(); + pair.setFirst(secondaryRS); + pair.setSecond(tertiaryRS); + } + } else { + // Random to choose the secondary and tertiary region server + // from another rack to place the secondary and tertiary + // Random to choose one rack except for the current rack + Set rackSkipSet = new HashSet(); + rackSkipSet.add(primaryRack); + String secondaryRack = getOneRandomRack(rackSkipSet); + List serverList = getServersFromRack(secondaryRack); + Set serverSet = new HashSet(); + serverSet.addAll(serverList); + + if (serverList.size() >= 2) { + + // Randomly pick up two servers from this secondary rack + // Skip the secondary for the tertiary placement + // skip the servers which share the primary already + Set primaries = serverToPrimaries.get(primaryRS); + Set skipServerSet = new HashSet(); + while (true) { + Pair secondaryAndTertiary = null; + if (primaries.size() > 1) { + // check where his tertiary and secondary are + for (HRegionInfo primary : primaries) { + secondaryAndTertiary = secondaryAndTertiaryMap.get(primary); + if (secondaryAndTertiary != null) { + if (regionServerToRackMap.get(secondaryAndTertiary.getFirst()) + .equals(secondaryRack)) { + skipServerSet.add(secondaryAndTertiary.getFirst()); + } + if (regionServerToRackMap.get(secondaryAndTertiary.getSecond()) + .equals(secondaryRack)) { + skipServerSet.add(secondaryAndTertiary.getSecond()); + } + } + } + } + if (skipServerSet.size() + 2 <= serverSet.size()) + break; + skipServerSet.clear(); + rackSkipSet.add(secondaryRack); + // we used all racks + if (rackSkipSet.size() == getTotalNumberOfRacks()) { + // remove the last two added and break + skipServerSet.remove(secondaryAndTertiary.getFirst()); + skipServerSet.remove(secondaryAndTertiary.getSecond()); + break; + } + secondaryRack = getOneRandomRack(rackSkipSet); + serverList = getServersFromRack(secondaryRack); + serverSet = new HashSet(); + serverSet.addAll(serverList); + } + + // Place the secondary RS + ServerName secondaryRS = getOneRandomServer(secondaryRack, skipServerSet); + skipServerSet.add(secondaryRS); + // Place the tertiary RS + ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet); + + if (secondaryRS == null || tertiaryRS == null) { + LOG.error("Cannot place the secondary and tertiary" + + " region server for region " + + regionInfo.getRegionNameAsString()); + } + // Create the secondary and tertiary pair + pair = new Pair(); + pair.setFirst(secondaryRS); + pair.setSecond(tertiaryRS); + } else { + // Pick the secondary rs from this secondary rack + // and pick the tertiary from another random rack + pair = new Pair(); + ServerName secondary = getOneRandomServer(secondaryRack); + pair.setFirst(secondary); + + // Pick the tertiary + if (getTotalNumberOfRacks() == 2) { + // Pick the tertiary from the same rack of the primary RS + Set serverSkipSet = new HashSet(); + serverSkipSet.add(primaryRS); + ServerName tertiary = getOneRandomServer(primaryRack, + serverSkipSet); + pair.setSecond(tertiary); + } else { + // Pick the tertiary from another rack + rackSkipSet.add(secondaryRack); + String tertiaryRandomRack = getOneRandomRack(rackSkipSet); + ServerName tertinary = getOneRandomServer(tertiaryRandomRack); + pair.setSecond(tertinary); + } + } + } + if (pair != null) { + secondaryAndTertiaryMap.put(regionInfo, pair); + LOG.debug("Place the secondary and tertiary region server for region " + + regionInfo.getRegionNameAsString()); + } + } catch (Exception e) { + LOG.warn("Cannot place the favored nodes for region " + + regionInfo.getRegionNameAsString() + " because " + e); + continue; + } + } + return secondaryAndTertiaryMap; + } + private ServerName[] singleRackCase(HRegionInfo regionInfo, ServerName primaryRS, String primaryRack) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodes.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodes.java index 7acb863..7c2b58e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodes.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodes.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.RegionPlacementMaintainer; import org.jboss.netty.util.internal.ConcurrentHashMap; /** @@ -81,7 +82,7 @@ public class FavoredNodes { * @param server * @return position */ - static Position getFavoredServerPosition( + public static Position getFavoredServerPosition( List favoredNodes, ServerName server) { if (favoredNodes == null || server == null || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { @@ -94,4 +95,26 @@ public class FavoredNodes { } return null; } + + /** + * @return the mapping between each region to its favored region server list + */ + public synchronized Map> getAssignmentMap() { + return this.favoredNodesMap; + } + + /** + * Add an assignment to the plan + * @param region + * @param servers + */ + public synchronized void updateAssignmentPlan(HRegionInfo region, + List servers) { + if (region == null || servers == null || servers.size() ==0) + return; + this.favoredNodesMap.put(region, servers); + LOG.info("Update the assignment plan for region " + + region.getRegionNameAsString() + " ; favored nodes " + + RegionPlacementMaintainer.getFavoredNodesAsString(servers)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 3ddbfe8..5f36bcc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -145,6 +145,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; @@ -4268,4 +4269,17 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa } return result; } + + @Override + public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, + OpenRegionRequest request) throws ServiceException { + List openInfoList = request.getOpenInfoList(); + UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder(); + for (RegionOpenInfo regionOpenInfo : openInfoList) { + HRegionInfo hri = HRegionInfo.convert(regionOpenInfo.getRegion()); + updateRegionFavoredNodesMapping(hri.getEncodedName(), regionOpenInfo.getFavoredNodesList()); + } + respBuilder.setResponse(openInfoList.size()); + return respBuilder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java new file mode 100644 index 0000000..fbc03bd --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -0,0 +1,165 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +@InterfaceAudience.Private +class FSRegionScanner implements Runnable { + static private final Log LOG = LogFactory.getLog(FSRegionScanner.class); + + private Path regionPath; + + /** + * The file system used + */ + private FileSystem fs; + + /** + * Maps each region to the RS with highest locality for that region. + */ + private Map regionToBestLocalityRSMapping; + + /** + * Maps region encoded names to maps of hostnames to fractional locality of + * that region on that host. + */ + private Map> regionDegreeLocalityMapping; + + FSRegionScanner(FileSystem fs, Path regionPath, + Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) { + this.fs = fs; + this.regionPath = regionPath; + this.regionToBestLocalityRSMapping = regionToBestLocalityRSMapping; + this.regionDegreeLocalityMapping = regionDegreeLocalityMapping; + } + + @Override + public void run() { + try { + // empty the map for each region + Map blockCountMap = new HashMap(); + + //get table name + String tableName = regionPath.getParent().getName(); + int totalBlkCount = 0; + + // ignore null + FileStatus[] cfList = fs.listStatus(regionPath); + if (null == cfList) { + return; + } + + // for each cf, get all the blocks information + for (FileStatus cfStatus : cfList) { + if (!cfStatus.isDir()) { + // skip because this is not a CF directory + continue; + } + if (cfStatus.getPath().getName().startsWith(".")) { + continue; + } + FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath()); + if (null == storeFileLists) { + continue; + } + + for (FileStatus storeFile : storeFileLists) { + BlockLocation[] blkLocations = + fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); + if (null == blkLocations) { + continue; + } + + totalBlkCount += blkLocations.length; + for(BlockLocation blk: blkLocations) { + for (String host: blk.getHosts()) { + AtomicInteger count = blockCountMap.get(host); + if (count == null) { + count = new AtomicInteger(0); + blockCountMap.put(host, count); + } + count.incrementAndGet(); + } + } + } + } + + if (regionToBestLocalityRSMapping != null) { + int largestBlkCount = 0; + String hostToRun = null; + for (Map.Entry entry : blockCountMap.entrySet()) { + String host = entry.getKey(); + + int tmp = entry.getValue().get(); + if (tmp > largestBlkCount) { + largestBlkCount = tmp; + hostToRun = host; + } + } + + // empty regions could make this null + if (null == hostToRun) { + return; + } + + if (hostToRun.endsWith(".")) { + hostToRun = hostToRun.substring(0, hostToRun.length()-1); + } + String name = tableName + ":" + regionPath.getName(); + synchronized (regionToBestLocalityRSMapping) { + regionToBestLocalityRSMapping.put(name, hostToRun); + } + } + + if (regionDegreeLocalityMapping != null && totalBlkCount > 0) { + Map hostLocalityMap = new HashMap(); + for (Map.Entry entry : blockCountMap.entrySet()) { + String host = entry.getKey(); + if (host.endsWith(".")) { + host = host.substring(0, host.length() - 1); + } + // Locality is fraction of blocks local to this host. + float locality = ((float)entry.getValue().get()) / totalBlkCount; + hostLocalityMap.put(host, locality); + } + // Put the locality map into the result map, keyed by the encoded name + // of the region. + regionDegreeLocalityMapping.put(regionPath.getName(), hostLocalityMap); + } + } catch (IOException e) { + LOG.warn("Problem scanning file system", e); + } catch (RuntimeException e) { + LOG.warn("Problem scanning file system", e); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 29b6b67..a64a753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -34,6 +34,10 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import org.apache.commons.logging.Log; @@ -1653,4 +1657,202 @@ public abstract class FSUtils { fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1); return fs.rename(src, dest); } + + /** + * This function is to scan the root path of the file system to get the + * degree of locality for each region on each of the servers having at least + * one block of that region. + * + * @param conf + * the configuration to use + * @return the mapping from region encoded name to a map of server names to + * locality fraction + * @throws IOException + * in case of file system errors or interrupts + */ + public static Map> getRegionDegreeLocalityMappingFromFS( + final Configuration conf) throws IOException { + return getRegionDegreeLocalityMappingFromFS( + conf, null, + conf.getInt("hbase.client.localityCheck.threadPoolSize", 2)); + + } + + /** + * This function is to scan the root path of the file system to get the + * degree of locality for each region on each of the servers having at least + * one block of that region. + * + * @param conf + * the configuration to use + * @param tableName + * the table to be scanned + * @return the mapping from region encoded name to a map of server names to + * locality fraction + * @throws IOException + * in case of file system errors or interrupts + */ + public static Map> getRegionDegreeLocalityMappingFromFS( + final Configuration conf, String tableName) throws IOException { + return getRegionDegreeLocalityMappingFromFS( + conf, tableName, + conf.getInt("hbase.client.localityCheck.threadPoolSize", 2)); + + } + + /** + * This function is to scan the root path of the file system to get the + * degree of locality for each region on each of the servers having at least + * one block of that region. + * + * @param conf + * the configuration to use + * @param desiredTable + * the table you wish to scan locality for + * @param threadPoolSize + * the thread pool size to use + * @return the mapping from region encoded name to a map of server names to + * locality fraction + * @throws IOException + * in case of file system errors or interrupts + */ + public static Map> getRegionDegreeLocalityMappingFromFS( + final Configuration conf, final String desiredTable, int threadPoolSize) + throws IOException { + Map> regionDegreeLocalityMapping = + new ConcurrentHashMap>(); + getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null, + regionDegreeLocalityMapping); + return regionDegreeLocalityMapping; + } + + /** + * This function is to scan the root path of the file system to get either the + * mapping between the region name and its best locality region server or the + * degree of locality of each region on each of the servers having at least + * one block of that region. The output map parameters are both optional. + * + * @param threadPoolSize + * the thread pool size to use + * @param conf + * the configuration to use + * @param desiredTable + * the table you wish to scan locality for + * @param regionToBestLocalityRSMapping + * the map into which to put the best locality mapping or null + * @param regionDegreeLocalityMapping + * the map into which to put the locality degree mapping or null, + * must be a thread-safe implementation + * @throws IOException + * in case of file system errors or interrupts + */ + private static void getRegionLocalityMappingFromFS( + final Configuration conf, final String desiredTable, + int threadPoolSize, + Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) + throws IOException { + FileSystem fs = FileSystem.get(conf); + Path rootPath = FSUtils.getRootDir(conf); + long startTime = System.currentTimeMillis(); + Path queryPath; + if (null == desiredTable) { + queryPath = new Path(rootPath.toString() + "/*/*/"); + } else { + queryPath = new Path(rootPath.toString() + "/" + desiredTable + "/*/"); + } + + // reject all paths that are not appropriate + PathFilter pathFilter = new PathFilter() { + @Override + public boolean accept(Path path) { + // this is the region name; it may get some noise data + if (null == path) { + return false; + } + + // no parent? + Path parent = path.getParent(); + if (null == parent) { + return false; + } + + // not part of a table? + if (parent.getName().startsWith(".") + && !parent.getName().equals(".META.")) { + return false; + } + + String regionName = path.getName(); + if (null == regionName) { + return false; + } + + if (!regionName.toLowerCase().matches("[0-9a-f]+")) { + return false; + } + return true; + } + }; + + FileStatus[] statusList = fs.globStatus(queryPath, pathFilter); + + if (null == statusList) { + return; + } else { + LOG.debug("Query Path: " + queryPath + " ; # list of files: " + + statusList.length); + } + + // lower the number of threads in case we have very few expected regions + threadPoolSize = Math.min(threadPoolSize, statusList.length); + + // run in multiple threads + ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, + threadPoolSize, 60, TimeUnit.SECONDS, + new ArrayBlockingQueue(statusList.length)); + try { + // ignore all file status items that are not of interest + for (FileStatus regionStatus : statusList) { + if (null == regionStatus) { + continue; + } + + if (!regionStatus.isDir()) { + continue; + } + + Path regionPath = regionStatus.getPath(); + if (null == regionPath) { + continue; + } + + tpe.execute(new FSRegionScanner(fs, regionPath, + regionToBestLocalityRSMapping, regionDegreeLocalityMapping)); + } + } finally { + tpe.shutdown(); + int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, + 60 * 1000); + try { + // here we wait until TPE terminates, which is either naturally or by + // exceptions in the execution of the threads + while (!tpe.awaitTermination(threadWakeFrequency, + TimeUnit.MILLISECONDS)) { + // printing out rough estimate, so as to not introduce + // AtomicInteger + LOG.info("Locality checking is underway: { Scanned Regions : " + + tpe.getCompletedTaskCount() + "/" + + tpe.getTaskCount() + " }"); + } + } catch (InterruptedException e) { + throw new IOException(e); + } + } + + long overhead = System.currentTimeMillis() - startTime; + String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms"; + + LOG.info(overheadMsg); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java new file mode 100644 index 0000000..9308e01 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java @@ -0,0 +1,517 @@ +/* + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.util.Arrays; +import java.util.Deque; +import java.util.LinkedList; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Computes the optimal (minimal cost) assignment of jobs to workers (or other + * analogous) concepts given a cost matrix of each pair of job and worker, using + * the algorithm by James Munkres in "Algorithms for the Assignment and + * Transportation Problems", with additional optimizations as described by Jin + * Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment + * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in + * O(n^3) time and need O(n^2) auxiliary space where n is the number of jobs or + * workers, whichever is greater. + */ +@InterfaceAudience.Private +public class MunkresAssignment { + + // The original algorithm by Munkres uses the terms STAR and PRIME to denote + // different states of zero values in the cost matrix. These values are + // represented as byte constants instead of enums to save space in the mask + // matrix by a factor of 4n^2 where n is the size of the problem. + private static final byte NONE = 0; + private static final byte STAR = 1; + private static final byte PRIME = 2; + + // The algorithm requires that the number of column is at least as great as + // the number of rows. If that is not the case, then the cost matrix should + // be transposed before computation, and the solution matrix transposed before + // returning to the caller. + private final boolean transposed; + + // The number of rows of internal matrices. + private final int rows; + + // The number of columns of internal matrices. + private final int cols; + + // The cost matrix, the cost of assigning each row index to column index. + private float[][] cost; + + // Mask of zero cost assignment states. + private byte[][] mask; + + // Covering some rows of the cost matrix. + private boolean[] rowsCovered; + + // Covering some columns of the cost matrix. + private boolean[] colsCovered; + + // The alternating path between starred zeroes and primed zeroes + private Deque> path; + + // The solution, marking which rows should be assigned to which columns. The + // positions of elements in this array correspond to the rows of the cost + // matrix, and the value of each element correspond to the columns of the cost + // matrix, i.e. assignments[i] = j indicates that row i should be assigned to + // column j. + private int[] assignments; + + // Improvements described by Jin Kue Wong cache the least value in each row, + // as well as the column index of the least value in each row, and the pending + // adjustments to each row and each column. + private float[] leastInRow; + private int[] leastInRowIndex; + private float[] rowAdjust; + private float[] colAdjust; + + /** + * Construct a new problem instance with the specified cost matrix. The cost + * matrix must be rectangular, though not necessarily square. If one dimension + * is greater than the other, some elements in the greater dimension will not + * be assigned. The input cost matrix will not be modified. + * @param costMatrix + */ + public MunkresAssignment(float[][] costMatrix) { + // The algorithm assumes that the number of columns is at least as great as + // the number of rows. If this is not the case of the input matrix, then + // all internal structures must be transposed relative to the input. + this.transposed = costMatrix.length > costMatrix[0].length; + if (this.transposed) { + this.rows = costMatrix[0].length; + this.cols = costMatrix.length; + } else { + this.rows = costMatrix.length; + this.cols = costMatrix[0].length; + } + + cost = new float[rows][cols]; + mask = new byte[rows][cols]; + rowsCovered = new boolean[rows]; + colsCovered = new boolean[cols]; + path = new LinkedList>(); + + leastInRow = new float[rows]; + leastInRowIndex = new int[rows]; + rowAdjust = new float[rows]; + colAdjust = new float[cols]; + + assignments = null; + + // Copy cost matrix. + if (transposed) { + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + cost[r][c] = costMatrix[c][r]; + } + } + } else { + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + cost[r][c] = costMatrix[r][c]; + } + } + } + + // Costs must be finite otherwise the matrix can get into a bad state where + // no progress can be made. If your use case depends on a distinction + // between costs of MAX_VALUE and POSITIVE_INFINITY, you're doing it wrong. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (cost[r][c] == Float.POSITIVE_INFINITY) { + cost[r][c] = Float.MAX_VALUE; + } + } + } + } + + /** + * Get the optimal assignments. The returned array will have the same number + * of elements as the number of elements as the number of rows in the input + * cost matrix. Each element will indicate which column should be assigned to + * that row or -1 if no column should be assigned, i.e. if result[i] = j then + * row i should be assigned to column j. Subsequent invocations of this method + * will simply return the same object without additional computation. + * @return an array with the optimal assignments + */ + public int[] solve() { + // If this assignment problem has already been solved, return the known + // solution + if (assignments != null) { + return assignments; + } + + preliminaries(); + + // Find the optimal assignments. + while (!testIsDone()) { + while (!stepOne()) { + stepThree(); + } + stepTwo(); + } + + // Extract the assignments from the mask matrix. + if (transposed) { + assignments = new int[cols]; + outer: + for (int c = 0; c < cols; c++) { + for (int r = 0; r < rows; r++) { + if (mask[r][c] == STAR) { + assignments[c] = r; + continue outer; + } + } + // There is no assignment for this row of the input/output. + assignments[c] = -1; + } + } else { + assignments = new int[rows]; + outer: + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == STAR) { + assignments[r] = c; + continue outer; + } + } + } + } + + // Once the solution has been computed, there is no need to keep any of the + // other internal structures. Clear all unnecessary internal references so + // the garbage collector may reclaim that memory. + cost = null; + mask = null; + rowsCovered = null; + colsCovered = null; + path = null; + leastInRow = null; + leastInRowIndex = null; + rowAdjust = null; + colAdjust = null; + + return assignments; + } + + /** + * Corresponds to the "preliminaries" step of the original algorithm. + * Guarantees that the matrix is an equivalent non-negative matrix with at + * least one zero in each row. + */ + private void preliminaries() { + for (int r = 0; r < rows; r++) { + // Find the minimum cost of each row. + float min = Float.POSITIVE_INFINITY; + for (int c = 0; c < cols; c++) { + min = Math.min(min, cost[r][c]); + } + + // Subtract that minimum cost from each element in the row. + for (int c = 0; c < cols; c++) { + cost[r][c] -= min; + + // If the element is now zero and there are no zeroes in the same row + // or column which are already starred, then star this one. There + // must be at least one zero because of subtracting the min cost. + if (cost[r][c] == 0 && !rowsCovered[r] && !colsCovered[c]) { + mask[r][c] = STAR; + // Cover this row and column so that no other zeroes in them can be + // starred. + rowsCovered[r] = true; + colsCovered[c] = true; + } + } + } + + // Clear the covered rows and columns. + Arrays.fill(rowsCovered, false); + Arrays.fill(colsCovered, false); + } + + /** + * Test whether the algorithm is done, i.e. we have the optimal assignment. + * This occurs when there is exactly one starred zero in each row. + * @return true if the algorithm is done + */ + private boolean testIsDone() { + // Cover all columns containing a starred zero. There can be at most one + // starred zero per column. Therefore, a covered column has an optimal + // assignment. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == STAR) { + colsCovered[c] = true; + } + } + } + + // Count the total number of covered columns. + int coveredCols = 0; + for (int c = 0; c < cols; c++) { + coveredCols += colsCovered[c] ? 1 : 0; + } + + // Apply an row and column adjustments that are pending. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + cost[r][c] += rowAdjust[r]; + cost[r][c] += colAdjust[c]; + } + } + + // Clear the pending row and column adjustments. + Arrays.fill(rowAdjust, 0); + Arrays.fill(colAdjust, 0); + + // The covers on columns and rows may have been reset, recompute the least + // value for each row. + for (int r = 0; r < rows; r++) { + leastInRow[r] = Float.POSITIVE_INFINITY; + for (int c = 0; c < cols; c++) { + if (!rowsCovered[r] && !colsCovered[c] && cost[r][c] < leastInRow[r]) { + leastInRow[r] = cost[r][c]; + leastInRowIndex[r] = c; + } + } + } + + // If all columns are covered, then we are done. Since there may be more + // columns than rows, we are also done if the number of covered columns is + // at least as great as the number of rows. + return (coveredCols == cols || coveredCols >= rows); + } + + /** + * Corresponds to step 1 of the original algorithm. + * @return false if all zeroes are covered + */ + private boolean stepOne() { + while (true) { + Pair zero = findUncoveredZero(); + if (zero == null) { + // No uncovered zeroes, need to manipulate the cost matrix in step + // three. + return false; + } else { + // Prime the uncovered zero and find a starred zero in the same row. + mask[zero.getFirst()][zero.getSecond()] = PRIME; + Pair star = starInRow(zero.getFirst()); + if (star != null) { + // Cover the row with both the newly primed zero and the starred zero. + // Since this is the only place where zeroes are primed, and we cover + // it here, and rows are only uncovered when primes are erased, then + // there can be at most one primed uncovered zero. + rowsCovered[star.getFirst()] = true; + colsCovered[star.getSecond()] = false; + updateMin(star.getFirst(), star.getSecond()); + } else { + // Will go to step two after, where a path will be constructed, + // starting from the uncovered primed zero (there is only one). Since + // we have already found it, save it as the first node in the path. + path.clear(); + path.offerLast(new Pair(zero.getFirst(), + zero.getSecond())); + return true; + } + } + } + } + + /** + * Corresponds to step 2 of the original algorithm. + */ + private void stepTwo() { + // Construct a path of alternating starred zeroes and primed zeroes, where + // each starred zero is in the same column as the previous primed zero, and + // each primed zero is in the same row as the previous starred zero. The + // path will always end in a primed zero. + while (true) { + Pair star = starInCol(path.getLast().getSecond()); + if (star != null) { + path.offerLast(star); + } else { + break; + } + Pair prime = primeInRow(path.getLast().getFirst()); + path.offerLast(prime); + } + + // Augment path - unmask all starred zeroes and star all primed zeroes. All + // nodes in the path will be either starred or primed zeroes. The set of + // starred zeroes is independent and now one larger than before. + for (Pair p : path) { + if (mask[p.getFirst()][p.getSecond()] == STAR) { + mask[p.getFirst()][p.getSecond()] = NONE; + } else { + mask[p.getFirst()][p.getSecond()] = STAR; + } + } + + // Clear all covers from rows and columns. + Arrays.fill(rowsCovered, false); + Arrays.fill(colsCovered, false); + + // Remove the prime mask from all primed zeroes. + for (int r = 0; r < rows; r++) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == PRIME) { + mask[r][c] = NONE; + } + } + } + } + + /** + * Corresponds to step 3 of the original algorithm. + */ + private void stepThree() { + // Find the minimum uncovered cost. + float min = leastInRow[0]; + for (int r = 1; r < rows; r++) { + if (leastInRow[r] < min) { + min = leastInRow[r]; + } + } + + // Add the minimum cost to each of the costs in a covered row, or subtract + // the minimum cost from each of the costs in an uncovered column. As an + // optimization, do not actually modify the cost matrix yet, but track the + // adjustments that need to be made to each row and column. + for (int r = 0; r < rows; r++) { + if (rowsCovered[r]) { + rowAdjust[r] += min; + } + } + for (int c = 0; c < cols; c++) { + if (!colsCovered[c]) { + colAdjust[c] -= min; + } + } + + // Since the cost matrix is not being updated yet, the minimum uncovered + // cost per row must be updated. + for (int r = 0; r < rows; r++) { + if (!colsCovered[leastInRowIndex[r]]) { + // The least value in this row was in an uncovered column, meaning that + // it would have had the minimum value subtracted from it, and therefore + // will still be the minimum value in that row. + leastInRow[r] -= min; + } else { + // The least value in this row was in a covered column and would not + // have had the minimum value subtracted from it, so the minimum value + // could be some in another column. + for (int c = 0; c < cols; c++) { + if (cost[r][c] + colAdjust[c] + rowAdjust[r] < leastInRow[r]) { + leastInRow[r] = cost[r][c] + colAdjust[c] + rowAdjust[r]; + leastInRowIndex[r] = c; + } + } + } + } + } + + /** + * Find a zero cost assignment which is not covered. If there are no zero cost + * assignments which are uncovered, then null will be returned. + * @return pair of row and column indices of an uncovered zero or null + */ + private Pair findUncoveredZero() { + for (int r = 0; r < rows; r++) { + if (leastInRow[r] == 0) { + return new Pair(r, leastInRowIndex[r]); + } + } + return null; + } + + /** + * A specified row has become covered, and a specified column has become + * uncovered. The least value per row may need to be updated. + * @param row the index of the row which was just covered + * @param col the index of the column which was just uncovered + */ + private void updateMin(int row, int col) { + // If the row is covered we want to ignore it as far as least values go. + leastInRow[row] = Float.POSITIVE_INFINITY; + + for (int r = 0; r < rows; r++) { + // Since the column has only just been uncovered, it could not have any + // pending adjustments. Only covered rows can have pending adjustments + // and covered costs do not count toward row minimums. Therefore, we do + // not need to consider rowAdjust[r] or colAdjust[col]. + if (!rowsCovered[r] && cost[r][col] < leastInRow[r]) { + leastInRow[r] = cost[r][col]; + leastInRowIndex[r] = col; + } + } + } + + /** + * Find a starred zero in a specified row. If there are no starred zeroes in + * the specified row, then null will be returned. + * @param r the index of the row to be searched + * @return pair of row and column indices of starred zero or null + */ + private Pair starInRow(int r) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == STAR) { + return new Pair(r, c); + } + } + return null; + } + + /** + * Find a starred zero in the specified column. If there are no starred zeroes + * in the specified row, then null will be returned. + * @param c the index of the column to be searched + * @return pair of row and column indices of starred zero or null + */ + private Pair starInCol(int c) { + for (int r = 0; r < rows; r++) { + if (mask[r][c] == STAR) { + return new Pair(r, c); + } + } + return null; + } + + /** + * Find a primed zero in the specified row. If there are no primed zeroes in + * the specified row, then null will be returned. + * @param r the index of the row to be searched + * @return pair of row and column indices of primed zero or null + */ + private Pair primeInRow(int r) { + for (int c = 0; c < cols; c++) { + if (mask[r][c] == PRIME) { + return new Pair(r, c); + } + } + return null; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index ebc5ead..a4003ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; @@ -556,4 +557,11 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { // TODO Auto-generated method stub return null; } + + @Override + public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, + OpenRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } }