diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMobCompactionStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMobCompactionStatusProtos.java
new file mode 100644
index 0000000..bf0260f
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMobCompactionStatusProtos.java
@@ -0,0 +1,2794 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: MasterMobCompactionStatus.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class MasterMobCompactionStatusProtos {
+ private MasterMobCompactionStatusProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface GetMobCompactionRegionsRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableName table_name = 1;
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ boolean hasTableName();
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required .hbase.pb.ServerName server_name = 2;
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ boolean hasServerName();
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.GetMobCompactionRegionsRequest}
+ */
+ public static final class GetMobCompactionRegionsRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements GetMobCompactionRegionsRequestOrBuilder {
+ // Use GetMobCompactionRegionsRequest.newBuilder() to construct.
+ private GetMobCompactionRegionsRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GetMobCompactionRegionsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GetMobCompactionRegionsRequest defaultInstance;
+ public static GetMobCompactionRegionsRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetMobCompactionRegionsRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GetMobCompactionRegionsRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = serverName_.toBuilder();
+ }
+ serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(serverName_);
+ serverName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public GetMobCompactionRegionsRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GetMobCompactionRegionsRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableName table_name = 1;
+ public static final int TABLE_NAME_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required .hbase.pb.ServerName server_name = 2;
+ public static final int SERVER_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public boolean hasServerName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+ return serverName_;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+ return serverName_;
+ }
+
+ private void initFields() {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasServerName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getServerName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, tableName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, serverName_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, tableName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, serverName_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest) obj;
+
+ boolean result = true;
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasServerName() == other.hasServerName());
+ if (hasServerName()) {
+ result = result && getServerName()
+ .equals(other.getServerName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasServerName()) {
+ hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getServerName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.GetMobCompactionRegionsRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableNameFieldBuilder();
+ getServerNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (serverNameBuilder_ == null) {
+ serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ } else {
+ serverNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (serverNameBuilder_ == null) {
+ result.serverName_ = serverName_;
+ } else {
+ result.serverName_ = serverNameBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.getDefaultInstance()) return this;
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasServerName()) {
+ mergeServerName(other.getServerName());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasServerName()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ if (!getServerName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.TableName table_name = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required .hbase.pb.ServerName server_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public boolean hasServerName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+ if (serverNameBuilder_ == null) {
+ return serverName_;
+ } else {
+ return serverNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serverNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ serverName_ = value;
+ onChanged();
+ } else {
+ serverNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder setServerName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serverNameBuilder_ == null) {
+ serverName_ = builderForValue.build();
+ onChanged();
+ } else {
+ serverNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serverNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+ serverName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
+ } else {
+ serverName_ = value;
+ }
+ onChanged();
+ } else {
+ serverNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder clearServerName() {
+ if (serverNameBuilder_ == null) {
+ serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ onChanged();
+ } else {
+ serverNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getServerNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+ if (serverNameBuilder_ != null) {
+ return serverNameBuilder_.getMessageOrBuilder();
+ } else {
+ return serverName_;
+ }
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServerNameFieldBuilder() {
+ if (serverNameBuilder_ == null) {
+ serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+ serverName_,
+ getParentForChildren(),
+ isClean());
+ serverName_ = null;
+ }
+ return serverNameBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.GetMobCompactionRegionsRequest)
+ }
+
+ static {
+ defaultInstance = new GetMobCompactionRegionsRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.GetMobCompactionRegionsRequest)
+ }
+
+ public interface GetMobCompactionRegionsResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated bytes region_start_key = 1;
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ java.util.List getRegionStartKeyList();
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ int getRegionStartKeyCount();
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ com.google.protobuf.ByteString getRegionStartKey(int index);
+ }
+ /**
+ * Protobuf type {@code hbase.pb.GetMobCompactionRegionsResponse}
+ */
+ public static final class GetMobCompactionRegionsResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements GetMobCompactionRegionsResponseOrBuilder {
+ // Use GetMobCompactionRegionsResponse.newBuilder() to construct.
+ private GetMobCompactionRegionsResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GetMobCompactionRegionsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GetMobCompactionRegionsResponse defaultInstance;
+ public static GetMobCompactionRegionsResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetMobCompactionRegionsResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GetMobCompactionRegionsResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ regionStartKey_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ regionStartKey_.add(input.readBytes());
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ regionStartKey_ = java.util.Collections.unmodifiableList(regionStartKey_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public GetMobCompactionRegionsResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GetMobCompactionRegionsResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated bytes region_start_key = 1;
+ public static final int REGION_START_KEY_FIELD_NUMBER = 1;
+ private java.util.List regionStartKey_;
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public java.util.List
+ getRegionStartKeyList() {
+ return regionStartKey_;
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public int getRegionStartKeyCount() {
+ return regionStartKey_.size();
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public com.google.protobuf.ByteString getRegionStartKey(int index) {
+ return regionStartKey_.get(index);
+ }
+
+ private void initFields() {
+ regionStartKey_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < regionStartKey_.size(); i++) {
+ output.writeBytes(1, regionStartKey_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (int i = 0; i < regionStartKey_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(regionStartKey_.get(i));
+ }
+ size += dataSize;
+ size += 1 * getRegionStartKeyList().size();
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse) obj;
+
+ boolean result = true;
+ result = result && getRegionStartKeyList()
+ .equals(other.getRegionStartKeyList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getRegionStartKeyCount() > 0) {
+ hash = (37 * hash) + REGION_START_KEY_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionStartKeyList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.GetMobCompactionRegionsResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ regionStartKey_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_GetMobCompactionRegionsResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse(this);
+ int from_bitField0_ = bitField0_;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ regionStartKey_ = java.util.Collections.unmodifiableList(regionStartKey_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.regionStartKey_ = regionStartKey_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance()) return this;
+ if (!other.regionStartKey_.isEmpty()) {
+ if (regionStartKey_.isEmpty()) {
+ regionStartKey_ = other.regionStartKey_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureRegionStartKeyIsMutable();
+ regionStartKey_.addAll(other.regionStartKey_);
+ }
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated bytes region_start_key = 1;
+ private java.util.List regionStartKey_ = java.util.Collections.emptyList();
+ private void ensureRegionStartKeyIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ regionStartKey_ = new java.util.ArrayList(regionStartKey_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public java.util.List
+ getRegionStartKeyList() {
+ return java.util.Collections.unmodifiableList(regionStartKey_);
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public int getRegionStartKeyCount() {
+ return regionStartKey_.size();
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public com.google.protobuf.ByteString getRegionStartKey(int index) {
+ return regionStartKey_.get(index);
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public Builder setRegionStartKey(
+ int index, com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionStartKeyIsMutable();
+ regionStartKey_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public Builder addRegionStartKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionStartKeyIsMutable();
+ regionStartKey_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public Builder addAllRegionStartKey(
+ java.lang.Iterable extends com.google.protobuf.ByteString> values) {
+ ensureRegionStartKeyIsMutable();
+ super.addAll(values, regionStartKey_);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated bytes region_start_key = 1;
+ */
+ public Builder clearRegionStartKey() {
+ regionStartKey_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.GetMobCompactionRegionsResponse)
+ }
+
+ static {
+ defaultInstance = new GetMobCompactionRegionsResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.GetMobCompactionRegionsResponse)
+ }
+
+ public interface UpdateMobCompactionAsMajorRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableName table_name = 1;
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ boolean hasTableName();
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required .hbase.pb.ServerName server_name = 2;
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ boolean hasServerName();
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.UpdateMobCompactionAsMajorRequest}
+ */
+ public static final class UpdateMobCompactionAsMajorRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements UpdateMobCompactionAsMajorRequestOrBuilder {
+ // Use UpdateMobCompactionAsMajorRequest.newBuilder() to construct.
+ private UpdateMobCompactionAsMajorRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private UpdateMobCompactionAsMajorRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final UpdateMobCompactionAsMajorRequest defaultInstance;
+ public static UpdateMobCompactionAsMajorRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public UpdateMobCompactionAsMajorRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private UpdateMobCompactionAsMajorRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = serverName_.toBuilder();
+ }
+ serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(serverName_);
+ serverName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public UpdateMobCompactionAsMajorRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new UpdateMobCompactionAsMajorRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableName table_name = 1;
+ public static final int TABLE_NAME_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required .hbase.pb.ServerName server_name = 2;
+ public static final int SERVER_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public boolean hasServerName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+ return serverName_;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+ return serverName_;
+ }
+
+ private void initFields() {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasServerName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getServerName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, tableName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, serverName_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, tableName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, serverName_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest) obj;
+
+ boolean result = true;
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasServerName() == other.hasServerName());
+ if (hasServerName()) {
+ result = result && getServerName()
+ .equals(other.getServerName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasServerName()) {
+ hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getServerName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.UpdateMobCompactionAsMajorRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableNameFieldBuilder();
+ getServerNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (serverNameBuilder_ == null) {
+ serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ } else {
+ serverNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (serverNameBuilder_ == null) {
+ result.serverName_ = serverName_;
+ } else {
+ result.serverName_ = serverNameBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.getDefaultInstance()) return this;
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasServerName()) {
+ mergeServerName(other.getServerName());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasServerName()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ if (!getServerName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.TableName table_name = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table_name = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required .hbase.pb.ServerName server_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public boolean hasServerName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+ if (serverNameBuilder_ == null) {
+ return serverName_;
+ } else {
+ return serverNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serverNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ serverName_ = value;
+ onChanged();
+ } else {
+ serverNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder setServerName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serverNameBuilder_ == null) {
+ serverName_ = builderForValue.build();
+ onChanged();
+ } else {
+ serverNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serverNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+ serverName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
+ } else {
+ serverName_ = value;
+ }
+ onChanged();
+ } else {
+ serverNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public Builder clearServerName() {
+ if (serverNameBuilder_ == null) {
+ serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+ onChanged();
+ } else {
+ serverNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getServerNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+ if (serverNameBuilder_ != null) {
+ return serverNameBuilder_.getMessageOrBuilder();
+ } else {
+ return serverName_;
+ }
+ }
+ /**
+ * required .hbase.pb.ServerName server_name = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServerNameFieldBuilder() {
+ if (serverNameBuilder_ == null) {
+ serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+ serverName_,
+ getParentForChildren(),
+ isClean());
+ serverName_ = null;
+ }
+ return serverNameBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.UpdateMobCompactionAsMajorRequest)
+ }
+
+ static {
+ defaultInstance = new UpdateMobCompactionAsMajorRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.UpdateMobCompactionAsMajorRequest)
+ }
+
+ public interface UpdateMobCompactionAsMajorResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code hbase.pb.UpdateMobCompactionAsMajorResponse}
+ */
+ public static final class UpdateMobCompactionAsMajorResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements UpdateMobCompactionAsMajorResponseOrBuilder {
+ // Use UpdateMobCompactionAsMajorResponse.newBuilder() to construct.
+ private UpdateMobCompactionAsMajorResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private UpdateMobCompactionAsMajorResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final UpdateMobCompactionAsMajorResponse defaultInstance;
+ public static UpdateMobCompactionAsMajorResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public UpdateMobCompactionAsMajorResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private UpdateMobCompactionAsMajorResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public UpdateMobCompactionAsMajorResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new UpdateMobCompactionAsMajorResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.UpdateMobCompactionAsMajorResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.UpdateMobCompactionAsMajorResponse)
+ }
+
+ static {
+ defaultInstance = new UpdateMobCompactionAsMajorResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.UpdateMobCompactionAsMajorResponse)
+ }
+
+ /**
+ * Protobuf service {@code hbase.pb.MasterMobCompactionStatusService}
+ */
+ public static abstract class MasterMobCompactionStatusService
+ implements com.google.protobuf.Service {
+ protected MasterMobCompactionStatusService() {}
+
+ public interface Interface {
+ /**
+ * rpc GetMobCompactionRegions(.hbase.pb.GetMobCompactionRegionsRequest) returns (.hbase.pb.GetMobCompactionRegionsResponse);
+ */
+ public abstract void getMobCompactionRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc UpdateMobCompactionAsMajor(.hbase.pb.UpdateMobCompactionAsMajorRequest) returns (.hbase.pb.UpdateMobCompactionAsMajorResponse);
+ */
+ public abstract void updateMobCompactionAsMajor(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new MasterMobCompactionStatusService() {
+ @java.lang.Override
+ public void getMobCompactionRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getMobCompactionRegions(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void updateMobCompactionAsMajor(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.updateMobCompactionAsMajor(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return impl.getMobCompactionRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest)request);
+ case 1:
+ return impl.updateMobCompactionAsMajor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest)request);
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ };
+ }
+
+ /**
+ * rpc GetMobCompactionRegions(.hbase.pb.GetMobCompactionRegionsRequest) returns (.hbase.pb.GetMobCompactionRegionsResponse);
+ */
+ public abstract void getMobCompactionRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc UpdateMobCompactionAsMajor(.hbase.pb.UpdateMobCompactionAsMajorRequest) returns (.hbase.pb.UpdateMobCompactionAsMajorResponse);
+ */
+ public abstract void updateMobCompactionAsMajor(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public static final
+ com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.getDescriptor().getServices().get(0);
+ }
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final void callMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request,
+ com.google.protobuf.RpcCallback<
+ com.google.protobuf.Message> done) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callMethod() given method descriptor for wrong " +
+ "service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ this.getMobCompactionRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 1:
+ this.updateMobCompactionAsMajor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance();
+ case 1:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public static Stub newStub(
+ com.google.protobuf.RpcChannel channel) {
+ return new Stub(channel);
+ }
+
+ public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.MasterMobCompactionStatusService implements Interface {
+ private Stub(com.google.protobuf.RpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.RpcChannel channel;
+
+ public com.google.protobuf.RpcChannel getChannel() {
+ return channel;
+ }
+
+ public void getMobCompactionRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance()));
+ }
+
+ public void updateMobCompactionAsMajor(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(1),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance()));
+ }
+ }
+
+ public static BlockingInterface newBlockingStub(
+ com.google.protobuf.BlockingRpcChannel channel) {
+ return new BlockingStub(channel);
+ }
+
+ public interface BlockingInterface {
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse getMobCompactionRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse updateMobCompactionAsMajor(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request)
+ throws com.google.protobuf.ServiceException;
+ }
+
+ private static final class BlockingStub implements BlockingInterface {
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.BlockingRpcChannel channel;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse getMobCompactionRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse updateMobCompactionAsMajor(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(1),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse.getDefaultInstance());
+ }
+
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.MasterMobCompactionStatusService)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_GetMobCompactionRegionsRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_GetMobCompactionRegionsRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_GetMobCompactionRegionsResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_GetMobCompactionRegionsResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\037MasterMobCompactionStatus.proto\022\010hbase" +
+ ".pb\032\013HBase.proto\"t\n\036GetMobCompactionRegi" +
+ "onsRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.p" +
+ "b.TableName\022)\n\013server_name\030\002 \002(\0132\024.hbase" +
+ ".pb.ServerName\";\n\037GetMobCompactionRegion" +
+ "sResponse\022\030\n\020region_start_key\030\001 \003(\014\"w\n!U" +
+ "pdateMobCompactionAsMajorRequest\022\'\n\ntabl" +
+ "e_name\030\001 \002(\0132\023.hbase.pb.TableName\022)\n\013ser" +
+ "ver_name\030\002 \002(\0132\024.hbase.pb.ServerName\"$\n\"" +
+ "UpdateMobCompactionAsMajorResponse2\213\002\n M",
+ "asterMobCompactionStatusService\022n\n\027GetMo" +
+ "bCompactionRegions\022(.hbase.pb.GetMobComp" +
+ "actionRegionsRequest\032).hbase.pb.GetMobCo" +
+ "mpactionRegionsResponse\022w\n\032UpdateMobComp" +
+ "actionAsMajor\022+.hbase.pb.UpdateMobCompac" +
+ "tionAsMajorRequest\032,.hbase.pb.UpdateMobC" +
+ "ompactionAsMajorResponseBU\n*org.apache.h" +
+ "adoop.hbase.protobuf.generatedB\037MasterMo" +
+ "bCompactionStatusProtosH\001\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_hbase_pb_GetMobCompactionRegionsRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_GetMobCompactionRegionsRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_GetMobCompactionRegionsRequest_descriptor,
+ new java.lang.String[] { "TableName", "ServerName", });
+ internal_static_hbase_pb_GetMobCompactionRegionsResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_hbase_pb_GetMobCompactionRegionsResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_GetMobCompactionRegionsResponse_descriptor,
+ new java.lang.String[] { "RegionStartKey", });
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorRequest_descriptor,
+ new java.lang.String[] { "TableName", "ServerName", });
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_UpdateMobCompactionAsMajorResponse_descriptor,
+ new java.lang.String[] { });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hbase-protocol/src/main/protobuf/MasterMobCompactionStatus.proto b/hbase-protocol/src/main/protobuf/MasterMobCompactionStatus.proto
new file mode 100644
index 0000000..970e35d
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/MasterMobCompactionStatus.proto
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "MasterMobCompactionStatusProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+message GetMobCompactionRegionsRequest {
+ required TableName table_name = 1;
+ required ServerName server_name = 2;
+}
+
+message GetMobCompactionRegionsResponse {
+ repeated bytes region_start_key = 1;
+}
+
+message UpdateMobCompactionAsMajorRequest {
+ required TableName table_name = 1;
+ required ServerName server_name = 2;
+}
+
+message UpdateMobCompactionAsMajorResponse {
+}
+
+service MasterMobCompactionStatusService {
+ rpc GetMobCompactionRegions(GetMobCompactionRegionsRequest)
+ returns(GetMobCompactionRegionsResponse);
+
+ rpc UpdateMobCompactionAsMajor(UpdateMobCompactionAsMajorRequest)
+ returns(UpdateMobCompactionAsMajorResponse);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index 5128662..1875196 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -218,7 +218,7 @@ public static boolean isHFileLink(String fileName) {
* @return Relative table path
* @throws IOException on unexpected error.
*/
- private static Path getHFileLinkPatternRelativePath(final Path path) {
+ public static Path getHFileLinkPatternRelativePath(final Path path) {
// table=region-hfile
Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(path.getName());
if (!m.matches()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f8d0003..d5d0164 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -43,6 +43,7 @@
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@@ -311,7 +312,7 @@ public void run() {
private HFileCleaner hfileCleaner;
private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;
private MobCompactionChore mobCompactChore;
- private MasterMobCompactionThread mobCompactThread;
+ private MobCompactionManager mobCompactionManager;
// used to synchronize the mobCompactionStates
private final IdLock mobCompactionLock = new IdLock();
// save the information of mob compactions in tables.
@@ -621,6 +622,9 @@ void initializeZKBasedSystemTrackers() throws IOException,
this.mpmHost = new MasterProcedureManagerHost();
this.mpmHost.register(this.snapshotManager);
this.mpmHost.register(new MasterFlushTableProcedureManager());
+ this.mobCompactionManager = new MobCompactionManager(this);
+ this.mpmHost.register(this.mobCompactionManager);
+ this.registerService(new HMasterMobCompactionStatusService(this));
this.mpmHost.loadProcedures(conf);
this.mpmHost.initialize(this, this.metricsMaster);
@@ -848,7 +852,6 @@ private void finishActiveMasterInitialization(MonitoredTask status)
LOG
.info("The period is " + mobCompactionPeriod + " seconds, MobCompactionChore is disabled");
}
- this.mobCompactThread = new MasterMobCompactionThread(this);
if (this.cpHost != null) {
// don't let cp initialization errors kill the master
@@ -1199,9 +1202,6 @@ private void stopChores() {
if (this.clusterStatusPublisherChore != null){
clusterStatusPublisherChore.cancel(true);
}
- if (this.mobCompactThread != null) {
- this.mobCompactThread.close();
- }
}
/**
@@ -2801,11 +2801,11 @@ public void reportMobCompactionEnd(TableName tableName) throws IOException {
* @param tableName The table the compact.
* @param columns The compacted columns.
* @param allFiles Whether add all mob files into the compaction.
+ * @return The result of a mob compaction.
*/
- public void requestMobCompaction(TableName tableName,
- List columns, boolean allFiles) throws IOException {
- mobCompactThread.requestMobCompaction(conf, fs, tableName, columns,
- tableLockManager, allFiles);
+ public Future requestMobCompaction(TableName tableName, List columns,
+ boolean allFiles) throws IOException {
+ return mobCompactionManager.requestMobCompaction(tableName, columns, allFiles);
}
/**
@@ -2866,4 +2866,9 @@ public SplitOrMergeTracker getSplitOrMergeTracker() {
public LoadBalancer getLoadBalancer() {
return balancer;
}
+
+ @Override
+ public MobCompactionManager getMobCompactionManager() {
+ return mobCompactionManager;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterMobCompactionStatusService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterMobCompactionStatusService.java
new file mode 100644
index 0000000..95f5b4d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterMobCompactionStatusService.java
@@ -0,0 +1,81 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.MasterMobCompactionStatusService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorResponse;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+
+/**
+ * A HMaster service that provides the MOB compaction information.
+ */
+@InterfaceAudience.Private
+public class HMasterMobCompactionStatusService extends MasterMobCompactionStatusService {
+
+ private MasterServices master;
+
+ public HMasterMobCompactionStatusService(MasterServices master) {
+ this.master = master;
+ }
+
+ /**
+ * Gets the start keys of the compacted regions.
+ */
+ @Override
+ public void getMobCompactionRegions(RpcController controller,
+ GetMobCompactionRegionsRequest request, RpcCallback done) {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tnPb = request.getTableName();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName snPb = request
+ .getServerName();
+ List regionStartKeys = master.getMobCompactionManager().getCompactingRegions(
+ ProtobufUtil.toTableName(tnPb), ProtobufUtil.toServerName(snPb));
+ GetMobCompactionRegionsResponse.Builder builder = GetMobCompactionRegionsResponse.newBuilder();
+ if (!regionStartKeys.isEmpty()) {
+ for (byte[] startKey : regionStartKeys) {
+ builder.addRegionStartKey(ByteString.copyFrom(startKey));
+ }
+ }
+ done.run(builder.build());
+ }
+
+ /**
+ * Updates the MOB compaction as major in the given server.
+ */
+ @Override
+ public void updateMobCompactionAsMajor(RpcController controller,
+ UpdateMobCompactionAsMajorRequest request,
+ RpcCallback done) {
+ TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+ ServerName serverName = ProtobufUtil.toServerName(request.getServerName());
+ master.getMobCompactionManager().updateAsMajorCompaction(tableName, serverName);
+ done.run(UpdateMobCompactionAsMajorResponse.getDefaultInstance());
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
deleted file mode 100644
index f8a5c15..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-/**
- * The mob compaction thread used in {@link MasterRpcServices}
- */
-@InterfaceAudience.Private
-public class MasterMobCompactionThread {
- static final Log LOG = LogFactory.getLog(MasterMobCompactionThread.class);
- private final HMaster master;
- private final Configuration conf;
- private final ExecutorService mobCompactorPool;
- private final ExecutorService masterMobPool;
-
- public MasterMobCompactionThread(HMaster master) {
- this.master = master;
- this.conf = master.getConfiguration();
- final String n = Thread.currentThread().getName();
- // this pool is used to run the mob compaction
- this.masterMobPool = new ThreadPoolExecutor(1, 2, 60, TimeUnit.SECONDS,
- new SynchronousQueue(), new ThreadFactory() {
- @Override
- public Thread newThread(Runnable r) {
- Thread t = new Thread(r);
- t.setName(n + "-MasterMobCompaction-" + EnvironmentEdgeManager.currentTime());
- return t;
- }
- });
- ((ThreadPoolExecutor) this.masterMobPool).allowCoreThreadTimeOut(true);
- // this pool is used in the mob compaction to compact the mob files by partitions
- // in parallel
- this.mobCompactorPool = MobUtils
- .createMobCompactorThreadPool(master.getConfiguration());
- }
-
- /**
- * Requests mob compaction
- * @param conf The Configuration
- * @param fs The file system
- * @param tableName The table the compact
- * @param columns The column descriptors
- * @param tableLockManager The tableLock manager
- * @param allFiles Whether add all mob files into the compaction.
- */
- public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- List columns, TableLockManager tableLockManager, boolean allFiles)
- throws IOException {
- master.reportMobCompactionStart(tableName);
- try {
- masterMobPool.execute(new CompactionRunner(fs, tableName, columns, tableLockManager,
- allFiles, mobCompactorPool));
- } catch (RejectedExecutionException e) {
- // in case the request is rejected by the pool
- try {
- master.reportMobCompactionEnd(tableName);
- } catch (IOException e1) {
- LOG.error("Failed to mark end of mob compation", e1);
- }
- throw e;
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("The mob compaction is requested for the columns " + columns
- + " of the table " + tableName.getNameAsString());
- }
- }
-
- private class CompactionRunner implements Runnable {
- private FileSystem fs;
- private TableName tableName;
- private List hcds;
- private TableLockManager tableLockManager;
- private boolean allFiles;
- private ExecutorService pool;
-
- public CompactionRunner(FileSystem fs, TableName tableName, List hcds,
- TableLockManager tableLockManager, boolean allFiles, ExecutorService pool) {
- super();
- this.fs = fs;
- this.tableName = tableName;
- this.hcds = hcds;
- this.tableLockManager = tableLockManager;
- this.allFiles = allFiles;
- this.pool = pool;
- }
-
- @Override
- public void run() {
- try {
- for (HColumnDescriptor hcd : hcds) {
- MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, tableLockManager,
- allFiles);
- }
- } catch (IOException e) {
- LOG.error("Failed to perform the mob compaction", e);
- } finally {
- try {
- master.reportMobCompactionEnd(tableName);
- } catch (IOException e) {
- LOG.error("Failed to mark end of mob compation", e);
- }
- }
- }
- }
-
- /**
- * Only interrupt once it's done with a run through the work loop.
- */
- private void interruptIfNecessary() {
- mobCompactorPool.shutdown();
- masterMobPool.shutdown();
- }
-
- /**
- * Wait for all the threads finish.
- */
- private void join() {
- waitFor(mobCompactorPool, "Mob Compaction Thread");
- waitFor(masterMobPool, "Region Server Mob Compaction Thread");
- }
-
- /**
- * Closes the MasterMobCompactionThread.
- */
- public void close() {
- interruptIfNecessary();
- join();
- }
-
- /**
- * Wait for thread finish.
- * @param t the thread to wait
- * @param name the thread name.
- */
- private void waitFor(ExecutorService t, String name) {
- boolean done = false;
- while (!done) {
- try {
- done = t.awaitTermination(60, TimeUnit.SECONDS);
- LOG.info("Waiting for " + name + " to finish...");
- if (!done) {
- t.shutdownNow();
- }
- } catch (InterruptedException ie) {
- LOG.warn("Interrupted waiting for " + name + " to finish...");
- }
- }
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 21f14e8..3e3c7ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -355,4 +355,9 @@ public boolean abortProcedure(final long procId, final boolean mayInterruptIfRun
* @return load balancer
*/
public LoadBalancer getLoadBalancer();
+
+ /**
+ * @return Master's instance of {@link MobCompactionManager}
+ */
+ MobCompactionManager getMobCompactionManager();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 4b956e6..ee441a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -18,8 +18,10 @@
*/
package org.apache.hadoop.hbase.master;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
-import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
@@ -30,7 +32,6 @@
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.mob.MobUtils;
/**
* The Class MobCompactChore for running compaction regularly to merge small mob files.
@@ -40,15 +41,11 @@
private static final Log LOG = LogFactory.getLog(MobCompactionChore.class);
private HMaster master;
- private TableLockManager tableLockManager;
- private ExecutorService pool;
public MobCompactionChore(HMaster master, int period) {
// use the period as initial delay.
super(master.getServerName() + "-MobCompactionChore", master, period, period, TimeUnit.SECONDS);
this.master = master;
- this.tableLockManager = master.getTableLockManager();
- this.pool = MobUtils.createMobCompactorThreadPool(master.getConfiguration());
}
@Override
@@ -71,8 +68,11 @@ protected void chore() {
master.reportMobCompactionStart(htd.getTableName());
reported = true;
}
- MobUtils.doMobCompaction(master.getConfiguration(), master.getFileSystem(),
- htd.getTableName(), hcd, pool, tableLockManager, false);
+ List columns = new ArrayList(1);
+ columns.add(hcd);
+ Future future = master.requestMobCompaction(htd.getTableName(), columns, false);
+ // wait for the end of the mob compaction
+ future.get();
}
} finally {
if (reported) {
@@ -84,10 +84,4 @@ protected void chore() {
LOG.error("Failed to compact mob files", e);
}
}
-
- @Override
- protected void cleanup() {
- super.cleanup();
- pool.shutdown();
- }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionManager.java
new file mode 100644
index 0000000..4ff0183
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionManager.java
@@ -0,0 +1,739 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.procedure.Procedure;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.ScanInfo;
+import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
+import org.apache.hadoop.hbase.regionserver.StoreScanner;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.zookeeper.KeeperException;
+
+import com.google.common.collect.Lists;
+
+/**
+ * The mob compaction manager used in {@link HMaster}
+ */
+@InterfaceAudience.Private
+public class MobCompactionManager extends MasterProcedureManager implements Stoppable {
+
+ static final Log LOG = LogFactory.getLog(MobCompactionManager.class);
+ private final HMaster master;
+ private final ThreadPoolExecutor mobCompactionPool;
+ private final int delFileMaxCount;
+ private final int compactionBatchSize;
+ protected final int compactionKVMax;
+ private final Path tempPath;
+ private final CacheConfig compactionCacheConfig;
+ private boolean stopped;
+ public static final String MOB_COMPACTION_PROCEDURE_COLUMN_KEY = "mobCompaction-column";
+ public static final String MOB_COMPACTION_PROCEDURE_ALL_FILES_KEY = "mobCompaction-allFiles";
+
+ public static final String MOB_COMPACTION_PROCEDURE_SIGNATURE = "mob-compaction-proc";
+
+ private static final String MOB_COMPACTION_TIMEOUT_MILLIS_KEY =
+ "hbase.master.mob.compaction.timeoutMillis";
+ private static final int MOB_COMPACTION_TIMEOUT_MILLIS_DEFAULT = 1800000; // 30 min
+ private static final String MOB_COMPACTION_WAKE_MILLIS_KEY =
+ "hbase.master.mob.compaction.wakeMillis";
+ private static final int MOB_COMPACTION_WAKE_MILLIS_DEFAULT = 500;
+
+ private static final String MOB_COMPACTION_PROC_POOL_THREADS_KEY =
+ "hbase.master.mob.compaction.procedure.threads";
+ private static final int MOB_COMPACTION_PROC_POOL_THREADS_DEFAULT = 2;
+ private boolean closePoolOnStop = true;
+
+ private ProcedureCoordinator coordinator;
+ private Map> compactions = new HashMap>();
+ // This records the mappings of tables and mob compactions.
+ // The value of this map is a mapping of server names and pairs of the execution information and
+ // the related online regions.
+ private Map>>> compactingRegions =
+ new HashMap>>>();
+
+ public MobCompactionManager(HMaster master) {
+ this(master, null);
+ }
+
+ public MobCompactionManager(HMaster master, ThreadPoolExecutor mobCompactionPool) {
+ this.master = master;
+ Configuration conf = master.getConfiguration();
+ delFileMaxCount = conf.getInt(MobConstants.MOB_DELFILE_MAX_COUNT,
+ MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
+ compactionBatchSize = conf.getInt(MobConstants.MOB_COMPACTION_BATCH_SIZE,
+ MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE);
+ compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX,
+ HConstants.COMPACTION_KV_MAX_DEFAULT);
+ tempPath = new Path(MobUtils.getMobHome(conf), MobConstants.TEMP_DIR_NAME);
+ boolean cacheDataOnRead = conf.getBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY,
+ CacheConfig.DEFAULT_CACHE_DATA_ON_READ);
+ Configuration configration = conf;
+ if (cacheDataOnRead) {
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, Boolean.FALSE);
+ configration = copyOfConf;
+ }
+ compactionCacheConfig = new CacheConfig(configration);
+ // this pool is used to run the mob compaction
+ if (mobCompactionPool != null) {
+ this.mobCompactionPool = mobCompactionPool;
+ closePoolOnStop = false;
+ } else {
+ int threads = conf.getInt(MOB_COMPACTION_PROC_POOL_THREADS_KEY,
+ MOB_COMPACTION_PROC_POOL_THREADS_DEFAULT);
+ this.mobCompactionPool = ProcedureCoordinator.defaultPool("MasterMobCompaction", threads);
+ }
+ }
+
+ /**
+ * Requests mob compaction
+ * @param tableName The table the compact
+ * @param columns The column descriptors
+ * @param allFiles Whether add all mob files into the compaction.
+ */
+ public Future requestMobCompaction(TableName tableName, List columns,
+ boolean allFiles) throws IOException {
+ // only compact enabled table
+ if (!master.getAssignmentManager().getTableStateManager()
+ .isTableState(tableName, TableState.State.ENABLED)) {
+ LOG.warn("The table " + tableName + " is not enabled");
+ throw new TableNotEnabledException(tableName);
+ }
+ synchronized (this) {
+ // check if there is another mob compaction for the same table
+ Future compaction = compactions.get(tableName);
+ if (compaction != null && !compaction.isDone()) {
+ String msg = "Another mob compaction on table " + tableName.getNameAsString()
+ + " is in progress";
+ LOG.error(msg);
+ throw new IOException(msg);
+ }
+ Future future = mobCompactionPool.submit(new CompactionRunner(tableName, columns,
+ allFiles));
+ compactions.put(tableName, future);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The mob compaction is requested for the columns " + columns + " of the table "
+ + tableName.getNameAsString());
+ }
+ return future;
+ }
+ }
+
+ /**
+ * Gets the regions that run the mob compaction.
+ * @param tableName The table to run the mob compaction.
+ * @param serverName The server to run the mob compaction.
+ * @return The start keys of regions that run the mob compaction.
+ */
+ public List getCompactingRegions(TableName tableName, ServerName serverName) {
+ Map>> serverRegionMapping = compactingRegions
+ .get(tableName);
+ if (serverRegionMapping == null) {
+ return Collections.emptyList();
+ }
+ Pair> pair = serverRegionMapping.get(serverName);
+ if (pair == null) {
+ return Collections.emptyList();
+ }
+ return pair.getSecond();
+ }
+
+ /**
+ * Updates the mob compaction in the given server as major.
+ * @param tableName The table to run the mob compaction.
+ * @param serverName The server to run the mob compaction.
+ */
+ public void updateAsMajorCompaction(TableName tableName, ServerName serverName) {
+ Map>> serverRegionMapping = compactingRegions
+ .get(tableName);
+ if (serverRegionMapping == null) {
+ return;
+ }
+ Pair> pair = serverRegionMapping.get(serverName);
+ if (pair == null) {
+ return;
+ }
+ // mark it as true which means the mob compaction in the given server is major.
+ pair.setFirst(Boolean.TRUE);
+ }
+
+ /**
+ * A callable for mob compaction
+ */
+ private class CompactionRunner implements Callable {
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final TableName tableName;
+ private final List columns;
+ private final TableLockManager tableLockManager;
+ private final boolean allFiles;
+
+ public CompactionRunner(TableName tableName, List columns,
+ boolean allFiles) {
+ this.conf = master.getConfiguration();
+ this.fs = master.getFileSystem();
+ this.tableName = tableName;
+ this.columns = columns;
+ this.tableLockManager = master.getTableLockManager();
+ this.allFiles = allFiles;
+ }
+
+ @Override
+ public Void call() throws Exception{
+ boolean tableLocked = false;
+ TableLock lock = null;
+ try {
+ master.reportMobCompactionStart(tableName);
+ // acquire lock to sync with major compaction
+ // the tableLockManager might be null in testing. In that case, it is lock-free.
+ if (tableLockManager != null) {
+ lock = tableLockManager.writeLock(MobUtils.getTableLockName(tableName),
+ "Run MOB Compaction");
+ lock.acquire();
+ }
+ tableLocked = true;
+ StringBuilder errorMsg = null;
+ for (HColumnDescriptor column : columns) {
+ try {
+ doCompaction(conf, fs, tableName, column, allFiles);
+ } catch (IOException e) {
+ LOG.error("Failed to compact the mob files for the column " + column.getNameAsString()
+ + " in the table " + tableName.getNameAsString(), e);
+ if (errorMsg == null) {
+ errorMsg = new StringBuilder();
+ }
+ errorMsg.append(column.getNameAsString() + " ");
+ }
+ }
+ if (errorMsg != null) {
+ throw new IOException("Failed to compact the mob files for the columns "
+ + errorMsg.toString() + " in the table " + tableName.getNameAsString());
+ }
+ } catch (IOException e) {
+ LOG.error(
+ "Failed to perform the mob compaction for the table " + tableName.getNameAsString(), e);
+ throw e;
+ } finally {
+ // release lock
+ if (lock != null && tableLocked) {
+ try {
+ lock.release();
+ } catch (IOException e) {
+ LOG.error("Failed to release the write lock of mob compaction for the table "
+ + tableName.getNameAsString(), e);
+ }
+ }
+ // remove this compaction from memory.
+ synchronized (MobCompactionManager.this) {
+ compactions.remove(tableName);
+ }
+ try {
+ master.reportMobCompactionEnd(tableName);
+ } catch (IOException e) {
+ LOG.error(
+ "Failed to mark end of mob compation for the table " + tableName.getNameAsString(), e);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Performs mob compaction for a column.
+ * @param conf The current configuration.
+ * @param fs The current file system.
+ * @param tableName The current table name.
+ * @param column The current column descriptor.
+ * @param allFiles If a major compaction is required.
+ * @throws IOException
+ */
+ private void doCompaction(Configuration conf, FileSystem fs, TableName tableName,
+ HColumnDescriptor column, boolean allFiles) throws IOException {
+ // merge del files
+ Encryption.Context cryptoContext = EncryptionUtil.createEncryptionContext(conf, column);
+ Path mobTableDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), tableName);
+ Path mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableName, column.getNameAsString());
+ List delFilePaths = compactDelFiles(column, selectDelFiles(mobFamilyDir),
+ EnvironmentEdgeManager.currentTime(), cryptoContext, mobTableDir, mobFamilyDir);
+ // dispatch mob compaction request to region servers.
+ boolean archiveDelFiles = dispatchMobCompaction(tableName, column, allFiles);
+ // archive the del files if it is a major compaction
+ if (archiveDelFiles && !delFilePaths.isEmpty()) {
+ LOG.info("After a mob compaction with all files selected, archive the del files "
+ + delFilePaths);
+ List delFiles = new ArrayList();
+ for (Path delFilePath : delFilePaths) {
+ StoreFile sf = new StoreFile(fs, delFilePath, conf, compactionCacheConfig,
+ BloomType.NONE);
+ delFiles.add(sf);
+ }
+ try {
+ MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
+ } catch (IOException e) {
+ LOG.error("Failed to archive the del files " + delFilePaths, e);
+ }
+ }
+ }
+
+ /**
+ * Submits the procedure to region servers.
+ * @param tableName The current table name.
+ * @param column The current column descriptor.
+ * @param allFiles If a major compaction is required.
+ * @return True if all the mob files are compacted.
+ * @throws IOException
+ */
+ private boolean dispatchMobCompaction(TableName tableName, HColumnDescriptor column,
+ boolean allFiles) throws IOException {
+ // Only all the regions are online when the procedure runs in region
+ // servers, the compaction is considered as a major one.
+ // Use procedure to dispatch the compaction to region servers.
+ // Find all the online regions
+ boolean allRegionsOnline = true;
+ List> regionsAndLocations = MetaTableAccessor
+ .getTableRegionsAndLocations(master.getConnection(), tableName, false);
+ Map> regionServers = new HashMap>();
+ for (Pair region : regionsAndLocations) {
+ if (region != null && region.getFirst() != null && region.getSecond() != null) {
+ HRegionInfo hri = region.getFirst();
+ if (hri.isOffline()) {
+ allRegionsOnline = false;
+ continue;
+ }
+ String serverName = region.getSecond().toString();
+ List regionNames = regionServers.get(serverName);
+ if (regionNames != null) {
+ regionNames.add(hri.getStartKey());
+ } else {
+ regionNames = new ArrayList();
+ regionNames.add(hri.getStartKey());
+ regionServers.put(serverName, regionNames);
+ }
+ }
+ }
+ boolean archiveDelFiles = false;
+ Map>> serverRegionMapping = Collections.emptyMap();
+ if (allRegionsOnline && !regionServers.isEmpty()) {
+ // record the online regions of each region server
+ serverRegionMapping = new HashMap>>();
+ compactingRegions.put(tableName, serverRegionMapping);
+ for (Entry> entry : regionServers.entrySet()) {
+ String serverNameAsString = entry.getKey();
+ ServerName serverName = ServerName.valueOf(serverNameAsString);
+ List startKeysOfOnlineRegions = entry.getValue();
+ Pair> pair = new Pair>();
+ serverRegionMapping.put(serverName, pair);
+ pair.setFirst(Boolean.FALSE);
+ pair.setSecond(startKeysOfOnlineRegions);
+ }
+ }
+ // start the procedure
+ String procedureName = MobConstants.MOB_COMPACTION_PREFIX + tableName.getNameAsString();
+ ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(procedureName);
+ // the format of data is allFile(one byte) + columnName
+ byte[] data = new byte[2 + column.getNameAsString().length()];
+ data[0] = (byte) (allFiles ? 1 : 0);
+ data[1] = (byte) (allRegionsOnline ? 1 : 0);
+ Bytes.putBytes(data, 2, column.getName(), 0, column.getName().length);
+ Procedure proc = coordinator.startProcedure(monitor, procedureName, data,
+ Lists.newArrayList(regionServers.keySet()));
+ if (proc == null) {
+ String msg = "Failed to submit distributed procedure for mob compaction '"
+ + procedureName + "'";
+ LOG.error(msg);
+ throw new IOException(msg);
+ }
+ try {
+ // wait for the mob compaction to complete.
+ proc.waitForCompleted();
+ LOG.info("Done waiting - mob compaction for " + procedureName);
+ if (allRegionsOnline && !serverRegionMapping.isEmpty()) {
+ // check if all the files are selected in compaction of all region servers.
+ for (Entry>> entry : serverRegionMapping
+ .entrySet()) {
+ boolean major = entry.getValue().getFirst();
+ LOG.info("Mob compaction " + procedureName + " in server " + entry.getKey() + " is "
+ + (major ? "major" : "minor"));
+ if (major) {
+ archiveDelFiles = true;
+ } else {
+ archiveDelFiles = false;
+ break;
+ }
+ }
+ }
+ } catch (InterruptedException e) {
+ ForeignException ee = new ForeignException(
+ "Interrupted while waiting for snapshot to finish", e);
+ monitor.receive(ee);
+ Thread.currentThread().interrupt();
+ } catch (ForeignException e) {
+ monitor.receive(e);
+ } finally {
+ // clean up
+ compactingRegions.remove(tableName);
+ }
+ // return true if all the compactions are finished successfully and all files are selected
+ // in all region servers.
+ return allRegionsOnline && archiveDelFiles;
+ }
+
+ /**
+ * Selects all the del files.
+ * @param mobFamilyDir The directory where the compacted mob files are stored.
+ * @return The paths of selected del files.
+ * @throws IOException
+ */
+ private List selectDelFiles(Path mobFamilyDir) throws IOException {
+ // since all the del files are used in mob compaction, they have to be merged one time
+ // together.
+ List allDelFiles = new ArrayList();
+ for (FileStatus file : fs.listStatus(mobFamilyDir)) {
+ if (!file.isFile()) {
+ continue;
+ }
+ // group the del files and small files.
+ Path originalPath = file.getPath();
+ if (HFileLink.isHFileLink(file.getPath())) {
+ HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
+ FileStatus linkedFile = getLinkedFileStatus(link);
+ if (linkedFile == null) {
+ continue;
+ }
+ originalPath = link.getOriginPath();
+ }
+ if (StoreFileInfo.isDelFile(originalPath)) {
+ allDelFiles.add(file.getPath());
+ }
+ }
+ return allDelFiles;
+ }
+
+ private FileStatus getLinkedFileStatus(HFileLink link) throws IOException {
+ Path[] locations = link.getLocations();
+ for (Path location : locations) {
+ FileStatus file = getFileStatus(location);
+ if (file != null) {
+ return file;
+ }
+ }
+ return null;
+ }
+
+ private FileStatus getFileStatus(Path path) throws IOException {
+ try {
+ if (path != null) {
+ FileStatus file = fs.getFileStatus(path);
+ return file;
+ }
+ } catch (FileNotFoundException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The file " + path + " can not be found", e);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Compacts the del files in batches which avoids opening too many files.
+ * @param column The column family that the MOB compaction runs in.
+ * @param delFilePaths The paths of the del files.
+ * @param selectionTime The time when the compaction of del files starts
+ * @param cryptoContext The current encryption context.
+ * @param mobTableDir The directory of the MOB table.
+ * @param mobFamilyDir The directory of MOB column family.
+ * @return The paths of new del files after merging or the original files if no merging
+ * is necessary.
+ * @throws IOException
+ */
+ private List compactDelFiles(HColumnDescriptor column, List delFilePaths,
+ long selectionTime, Encryption.Context cryptoContext, Path mobTableDir, Path mobFamilyDir)
+ throws IOException {
+ if (delFilePaths.size() <= delFileMaxCount) {
+ return delFilePaths;
+ }
+ // when there are more del files than the number that is allowed, merge them firstly.
+ int offset = 0;
+ List paths = new ArrayList();
+ while (offset < delFilePaths.size()) {
+ // get the batch
+ int batch = compactionBatchSize;
+ if (delFilePaths.size() - offset < compactionBatchSize) {
+ batch = delFilePaths.size() - offset;
+ }
+ List batchedDelFiles = new ArrayList();
+ if (batch == 1) {
+ // only one file left, do not compact it, directly add it to the new files.
+ paths.add(delFilePaths.get(offset));
+ offset++;
+ continue;
+ }
+ for (int i = offset; i < batch + offset; i++) {
+ batchedDelFiles.add(new StoreFile(fs, delFilePaths.get(i), conf, compactionCacheConfig,
+ BloomType.NONE));
+ }
+ // compact the del files in a batch.
+ paths.add(compactDelFilesInBatch(column, batchedDelFiles, selectionTime, cryptoContext,
+ mobTableDir, mobFamilyDir));
+ // move to the next batch.
+ offset += batch;
+ }
+ return compactDelFiles(column, paths, selectionTime, cryptoContext, mobTableDir, mobFamilyDir);
+ }
+
+ /**
+ * Compacts the del file in a batch.
+ * @param column The column family that the MOB compaction runs in.
+ * @param delFiles The del files.
+ * @param selectionTime The time when the compaction of del files starts
+ * @param cryptoContext The current encryption context.
+ * @param mobTableDir The directory of the MOB table.
+ * @param mobFamilyDir The directory of MOB column family.
+ * @return The path of new del file after merging.
+ * @throws IOException
+ */
+ private Path compactDelFilesInBatch(HColumnDescriptor column, List delFiles,
+ long selectionTime, Encryption.Context cryptoContext, Path mobTableDir, Path mobFamilyDir)
+ throws IOException {
+ StoreFileWriter writer = null;
+ Path filePath = null;
+ // create a scanner for the del files.
+ try (StoreScanner scanner =
+ createScanner(column, delFiles, ScanType.COMPACT_RETAIN_DELETES)) {
+ writer = MobUtils.createDelFileWriter(conf, fs, column,
+ MobUtils.formatDate(new Date(selectionTime)), tempPath, Long.MAX_VALUE,
+ column.getCompactionCompressionType(), HConstants.EMPTY_START_ROW, compactionCacheConfig,
+ cryptoContext);
+ filePath = writer.getPath();
+ List cells = new ArrayList();
+ boolean hasMore = false;
+ ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax)
+ .build();
+ do {
+ hasMore = scanner.next(cells, scannerContext);
+ for (Cell cell : cells) {
+ writer.append(cell);
+ }
+ cells.clear();
+ } while (hasMore);
+ } finally {
+ if (writer != null) {
+ try {
+ writer.close();
+ } catch (IOException e) {
+ LOG.error("Failed to close the writer of the file " + filePath, e);
+ }
+ }
+ }
+ // commit the new del file
+ Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
+ // archive the old del files
+ try {
+ MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
+ } catch (IOException e) {
+ LOG.error(
+ "Failed to archive the old del files " + delFiles + " in the column "
+ + column.getNameAsString() + " of the table " + tableName.getNameAsString(), e);
+ }
+ return path;
+ }
+
+ /**
+ * Creates a store scanner.
+ * @param column The column family that that the MOB compaction runs in.
+ * @param filesToCompact The files to be compacted.
+ * @param scanType The scan type.
+ * @return The store scanner.
+ * @throws IOException
+ */
+ private StoreScanner createScanner(HColumnDescriptor column, List filesToCompact,
+ ScanType scanType) throws IOException {
+ List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false,
+ false, HConstants.LATEST_TIMESTAMP);
+ Scan scan = new Scan();
+ scan.setMaxVersions(column.getMaxVersions());
+ long ttl = HStore.determineTTLFromFamily(column);
+ ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.COMPARATOR);
+ StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L,
+ HConstants.LATEST_TIMESTAMP);
+ return scanner;
+ }
+ }
+
+ @Override
+ public void stop(String why) {
+ if (this.stopped) {
+ return;
+ }
+ this.stopped = true;
+ for (Future compaction : compactions.values()) {
+ if (compaction != null) {
+ compaction.cancel(true);
+ }
+ }
+ try {
+ if (coordinator != null) {
+ coordinator.close();
+ }
+ } catch (IOException e) {
+ LOG.error("stop ProcedureCoordinator error", e);
+ }
+ if (closePoolOnStop) {
+ if (this.mobCompactionPool != null) {
+ mobCompactionPool.shutdown();
+ }
+ }
+ }
+
+ @Override
+ public boolean isStopped() {
+ return this.stopped;
+ }
+
+ @Override
+ public void initialize(MasterServices master, MetricsMaster metricsMaster)
+ throws KeeperException, IOException, UnsupportedOperationException {
+ // get the configuration for the coordinator
+ Configuration conf = master.getConfiguration();
+ long wakeFrequency = conf.getInt(MOB_COMPACTION_WAKE_MILLIS_KEY,
+ MOB_COMPACTION_WAKE_MILLIS_DEFAULT);
+ long timeoutMillis = conf.getLong(MOB_COMPACTION_TIMEOUT_MILLIS_KEY,
+ MOB_COMPACTION_TIMEOUT_MILLIS_DEFAULT);
+
+ // setup the procedure coordinator
+ String name = master.getServerName().toString();
+ ProcedureCoordinatorRpcs comms = new ZKProcedureCoordinatorRpcs(master.getZooKeeper(),
+ getProcedureSignature(), name);
+
+ this.coordinator = new ProcedureCoordinator(comms, mobCompactionPool, timeoutMillis,
+ wakeFrequency);
+ }
+
+ @Override
+ public boolean isProcedureDone(ProcedureDescription desc) throws IOException {
+ TableName tableName = TableName.valueOf(desc.getInstance());
+ Future compaction = compactions.get(tableName);
+ if (compaction != null) {
+ return compaction.isDone();
+ }
+ return true;
+ }
+
+ @Override
+ public String getProcedureSignature() {
+ return MOB_COMPACTION_PROCEDURE_SIGNATURE;
+ }
+
+ @Override
+ public void execProcedure(ProcedureDescription desc) throws IOException {
+ TableName tableName = TableName.valueOf(desc.getInstance());
+ List props = desc.getConfigurationList();
+ List columnNames = new ArrayList();
+ boolean allFiles = false;
+ for (NameStringPair prop : props) {
+ if (MOB_COMPACTION_PROCEDURE_COLUMN_KEY.equalsIgnoreCase(prop.getName())) {
+ columnNames.add(prop.getValue());
+ } else if (MOB_COMPACTION_PROCEDURE_ALL_FILES_KEY.equalsIgnoreCase(prop.getName())) {
+ allFiles = "true".equalsIgnoreCase(prop.getValue());
+ }
+ }
+ HTableDescriptor htd = master.getTableDescriptors().get(tableName);
+ List compactedColumns = new ArrayList();
+ if (!columnNames.isEmpty()) {
+ for (String columnName : columnNames) {
+ HColumnDescriptor column = htd.getFamily(Bytes.toBytes(columnName));
+ if (column == null) {
+ LOG.error("Column family " + columnName + " does not exist");
+ throw new DoNotRetryIOException("Column family " + columnName + " does not exist");
+ }
+ if (!column.isMobEnabled()) {
+ String msg = "Column family " + column.getNameAsString() + " is not a mob column family";
+ LOG.error(msg);
+ throw new DoNotRetryIOException(msg);
+ }
+ compactedColumns.add(column);
+ }
+ } else {
+ for (HColumnDescriptor column : htd.getColumnFamilies()) {
+ if (column.isMobEnabled()) {
+ compactedColumns.add(column);
+ }
+ }
+ }
+ requestMobCompaction(tableName, compactedColumns, allFiles);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index 711f31d..74a5cf8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -271,11 +271,13 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
} while (hasMore);
} finally {
if (mobFileWriter != null) {
- mobFileWriter.appendMetadata(fd.maxSeqId, major, mobCells);
+ mobFileWriter.appendMetadata(fd.maxSeqId, major, mobCells, mobStore.getRegionInfo()
+ .getStartKey());
mobFileWriter.close();
}
if (delFileWriter != null) {
- delFileWriter.appendMetadata(fd.maxSeqId, major, deleteMarkersCount);
+ delFileWriter.appendMetadata(fd.maxSeqId, major, deleteMarkersCount, mobStore
+ .getRegionInfo().getStartKey());
delFileWriter.close();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 93fa327..5aabc2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -202,7 +202,8 @@ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
} while (hasMore);
} finally {
status.setStatus("Flushing mob file " + store + ": appending metadata");
- mobFileWriter.appendMetadata(cacheFlushId, false, mobCount);
+ mobFileWriter.appendMetadata(cacheFlushId, false, mobCount, mobStore.getRegionInfo()
+ .getStartKey());
status.setStatus("Flushing mob file " + store + ": closing flushed file");
mobFileWriter.close();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index 3c965cb..9d8dba5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -70,9 +70,15 @@ public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family)
FileSystem fs = FileSystem.get(conf);
LOG.info("Cleaning the expired MOB files of " + family.getNameAsString() + " in " + tableName);
// disable the block cache.
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- CacheConfig cacheConfig = new CacheConfig(copyOfConf);
+ boolean cacheDataOnRead = conf.getBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY,
+ CacheConfig.DEFAULT_CACHE_DATA_ON_READ);
+ Configuration configration = conf;
+ if (cacheDataOnRead) {
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, Boolean.FALSE);
+ configration = copyOfConf;
+ }
+ CacheConfig cacheConfig = new CacheConfig(configration);
MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig,
EnvironmentEdgeManager.currentTime());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
index 82fc9cf..8be4a54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobConstants.java
@@ -116,6 +116,7 @@
public static final String MOB_COMPACTION_THREADS_MAX =
"hbase.mob.compaction.threads.max";
public static final int DEFAULT_MOB_COMPACTION_THREADS_MAX = 1;
+ public static final String MOB_COMPACTION_PREFIX = "MOB_COMPACTION_";
private MobConstants() {
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index ecd2415..c8a91e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -20,6 +20,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
@@ -27,7 +28,6 @@
import java.util.Date;
import java.util.List;
import java.util.UUID;
-import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.SynchronousQueue;
@@ -43,7 +43,6 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -61,10 +60,6 @@
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.mob.compactions.MobCompactor;
-import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -72,7 +67,6 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.hbase.util.Threads;
/**
@@ -450,6 +444,7 @@ public static KeyValue createMobRefKeyValue(Cell cell, byte[] fileName, Tag tabl
/**
* Creates a writer for the mob file in temp directory.
+ * @param favoredNodes The favored nodes.
* @param conf The current configuration.
* @param fs The current file system.
* @param family The descriptor of the current column family.
@@ -463,15 +458,15 @@ public static KeyValue createMobRefKeyValue(Cell cell, byte[] fileName, Tag tabl
* @return The writer for the mob file.
* @throws IOException
*/
- public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
- Compression.Algorithm compression, String startKey, CacheConfig cacheConfig,
- Encryption.Context cryptoContext)
+ public static StoreFileWriter createWriter(InetSocketAddress[] favoredNodes,
+ Configuration conf, FileSystem fs, HColumnDescriptor family, String date, Path basePath,
+ long maxKeyCount, Compression.Algorithm compression, String startKey, CacheConfig cacheConfig,
+ Encryption.Context cryptoContext)
throws IOException {
MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID().toString()
.replaceAll("-", ""));
- return createWriter(conf, fs, family, mobFileName, basePath, maxKeyCount, compression,
- cacheConfig, cryptoContext);
+ return createWriter(favoredNodes, conf, fs, family, mobFileName, basePath,
+ maxKeyCount, compression, cacheConfig, cryptoContext);
}
/**
@@ -506,6 +501,7 @@ public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem
/**
* Creates a writer for the mob file in temp directory.
+ * @param favoredNodes The favored nodes.
* @param conf The current configuration.
* @param fs The current file system.
* @param family The descriptor of the current column family.
@@ -519,15 +515,15 @@ public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem
* @return The writer for the mob file.
* @throws IOException
*/
- public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
- Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
- Encryption.Context cryptoContext)
+ public static StoreFileWriter createWriter(InetSocketAddress[] favoredNodes,
+ Configuration conf, FileSystem fs, HColumnDescriptor family, String date, Path basePath,
+ long maxKeyCount, Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
+ Encryption.Context cryptoContext)
throws IOException {
MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID().toString()
.replaceAll("-", ""));
- return createWriter(conf, fs, family, mobFileName, basePath, maxKeyCount, compression,
- cacheConfig, cryptoContext);
+ return createWriter(favoredNodes, conf, fs, family, mobFileName, basePath,
+ maxKeyCount, compression, cacheConfig, cryptoContext);
}
/**
@@ -553,12 +549,13 @@ public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem
String suffix = UUID
.randomUUID().toString().replaceAll("-", "") + "_del";
MobFileName mobFileName = MobFileName.create(startKey, date, suffix);
- return createWriter(conf, fs, family, mobFileName, basePath, maxKeyCount, compression,
+ return createWriter(null, conf, fs, family, mobFileName, basePath, maxKeyCount, compression,
cacheConfig, cryptoContext);
}
/**
* Creates a writer for the mob file in temp directory.
+ * @param favoredNodes The favored nodes.
* @param conf The current configuration.
* @param fs The current file system.
* @param family The descriptor of the current column family.
@@ -571,10 +568,10 @@ public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem
* @return The writer for the mob file.
* @throws IOException
*/
- private static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- HColumnDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
- Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext)
- throws IOException {
+ private static StoreFileWriter createWriter(InetSocketAddress[] favoredNodes,
+ Configuration conf, FileSystem fs, HColumnDescriptor family, MobFileName mobFileName,
+ Path basePath, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig,
+ Encryption.Context cryptoContext) throws IOException {
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression)
.withIncludesMvcc(true).withIncludesTags(true)
.withCompressTags(family.isCompressTags())
@@ -583,11 +580,11 @@ private static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
.withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
.withCreateTime(EnvironmentEdgeManager.currentTime()).build();
-
StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConfig, fs)
.withFilePath(new Path(basePath, mobFileName.getFileName()))
.withComparator(CellComparator.COMPARATOR).withBloomType(BloomType.NONE)
- .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
+ .withMaxKeyCount(maxKeyCount).withFavoredNodes(favoredNodes).withFileContext(hFileContext)
+ .build();
return w;
}
@@ -695,64 +692,11 @@ public static TableName getTableLockName(TableName tn) {
}
/**
- * Performs the mob compaction.
- * @param conf the Configuration
- * @param fs the file system
- * @param tableName the table the compact
- * @param hcd the column descriptor
- * @param pool the thread pool
- * @param tableLockManager the tableLock manager
- * @param allFiles Whether add all mob files into the compaction.
- */
- public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor hcd, ExecutorService pool, TableLockManager tableLockManager,
- boolean allFiles) throws IOException {
- String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
- PartitionedMobCompactor.class.getName());
- // instantiate the mob compactor.
- MobCompactor compactor = null;
- try {
- compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
- Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class,
- ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
- } catch (Exception e) {
- throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
- }
- // compact only for mob-enabled column.
- // obtain a write table lock before performing compaction to avoid race condition
- // with major compaction in mob-enabled column.
- boolean tableLocked = false;
- TableLock lock = null;
- try {
- // the tableLockManager might be null in testing. In that case, it is lock-free.
- if (tableLockManager != null) {
- lock = tableLockManager.writeLock(MobUtils.getTableLockName(tableName),
- "Run MobCompactor");
- lock.acquire();
- }
- tableLocked = true;
- compactor.compact(allFiles);
- } catch (Exception e) {
- LOG.error("Failed to compact the mob files for the column " + hcd.getNameAsString()
- + " in the table " + tableName.getNameAsString(), e);
- } finally {
- if (lock != null && tableLocked) {
- try {
- lock.release();
- } catch (IOException e) {
- LOG.error(
- "Failed to release the write lock for the table " + tableName.getNameAsString(), e);
- }
- }
- }
- }
-
- /**
* Creates a thread pool.
* @param conf the Configuration
* @return A thread pool.
*/
- public static ExecutorService createMobCompactorThreadPool(Configuration conf) {
+ public static ThreadPoolExecutor createMobCompactorThreadPool(Configuration conf) {
int maxThreads = conf.getInt(MobConstants.MOB_COMPACTION_THREADS_MAX,
MobConstants.DEFAULT_MOB_COMPACTION_THREADS_MAX);
if (maxThreads == 0) {
@@ -771,7 +715,7 @@ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
}
}
});
- ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
+ pool.allowCoreThreadTimeOut(true);
return pool;
}
@@ -817,10 +761,7 @@ public static boolean isReadEmptyValueOnMobCellMiss(Scan scan) {
*/
public static void archiveMobStoreFiles(Configuration conf, FileSystem fs,
HRegionInfo mobRegionInfo, Path mobFamilyDir, byte[] family) throws IOException {
- // disable the block cache.
- Configuration copyOfConf = HBaseConfiguration.create(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- CacheConfig cacheConfig = new CacheConfig(copyOfConf);
+ CacheConfig cacheConfig = new CacheConfig(conf);
FileStatus[] fileStatus = FSUtils.listStatus(fs, mobFamilyDir);
List storeFileList = new ArrayList();
for (FileStatus file : fileStatus) {
@@ -828,4 +769,32 @@ public static void archiveMobStoreFiles(Configuration conf, FileSystem fs,
}
HFileArchiver.archiveStoreFiles(conf, fs, mobRegionInfo, mobFamilyDir, family, storeFileList);
}
+
+ /**
+ * Gets the referenced file status by the given hfile link.
+ * @param fs The file system.
+ * @param link The hfile link.
+ * @return The referenced file status.
+ * @throws IOException
+ */
+ public static FileStatus getReferencedFileStatus(FileSystem fs, HFileLink link)
+ throws IOException {
+ Path[] locations = link.getLocations();
+ for (Path location : locations) {
+ FileStatus file = null;
+ try {
+ if (location != null) {
+ file = fs.getFileStatus(location);
+ }
+ } catch (FileNotFoundException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The file " + location + " can not be found", e);
+ }
+ }
+ if (file != null) {
+ return file;
+ }
+ }
+ return null;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactionSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactionSubprocedure.java
new file mode 100644
index 0000000..420b4d6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactionSubprocedure.java
@@ -0,0 +1,345 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.compactions;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.mob.MobFileName;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.compactions.MobCompactionRequest.CompactionType;
+import org.apache.hadoop.hbase.mob.compactions.RegionServerMobCompactionProcedureManager.MobCompactionSubprocedurePool;
+import org.apache.hadoop.hbase.procedure.ProcedureMember;
+import org.apache.hadoop.hbase.procedure.Subprocedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMobCompactionStatusProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.ServiceException;
+
+/**
+ * The subprocedure implementation for mob compaction.
+ * The mob compaction is distributed to region servers, and executed in subprocedure
+ * in each region server.
+ */
+@InterfaceAudience.Private
+public class MobCompactionSubprocedure extends Subprocedure {
+ private static final Log LOG = LogFactory.getLog(MobCompactionSubprocedure.class);
+
+ private final Configuration conf;
+ private final String procName;
+ private final TableName tableName;
+ private final String columnName;
+ private final RegionServerServices rss;
+ private final List regions;
+ private final MobCompactionSubprocedurePool taskManager;
+ private boolean allFiles;
+ private boolean allRegionsOnline;
+ private Path mobFamilyDir;
+ private CacheConfig cacheConfig;
+ private RegionServerMobCompactionProcedureManager procedureManager;
+
+ public MobCompactionSubprocedure(RegionServerMobCompactionProcedureManager procedureManager,
+ ProcedureMember member, String procName, ForeignExceptionDispatcher errorListener,
+ long wakeFrequency, long timeout, RegionServerServices rss, List regions,
+ TableName tableName, String columnName, MobCompactionSubprocedurePool taskManager,
+ boolean allFiles, boolean allRegionsOnline) {
+ super(member, procName, errorListener, wakeFrequency, timeout);
+ this.procedureManager = procedureManager;
+ this.procName = procName;
+ this.tableName = tableName;
+ this.columnName = columnName;
+ this.rss = rss;
+ this.regions = regions;
+ this.taskManager = taskManager;
+ this.allFiles = allFiles;
+ this.allRegionsOnline = allRegionsOnline;
+ this.conf = rss.getConfiguration();
+ mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableName, columnName);
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false);
+ this.cacheConfig = new CacheConfig(copyOfConf);
+ }
+
+ /**
+ * Compacts mob files in the current region server.
+ */
+ @Override
+ public void acquireBarrier() throws ForeignException {
+ if (regions.isEmpty()) {
+ // No regions on this RS, we are basically done.
+ return;
+ }
+ synchronized (procedureManager) {
+ if (procedureManager.tablesToCompact.contains(tableName)) {
+ throw new ForeignException(getMemberName(), "The MOB compaction of the table " + tableName
+ + " is in progress in the server " + rss.getServerName());
+ }
+ procedureManager.tablesToCompact.add(tableName);
+ }
+ try {
+ execMobCompaction();
+ } finally {
+ synchronized (procedureManager) {
+ procedureManager.tablesToCompact.remove(tableName);
+ }
+ }
+ }
+
+ private void execMobCompaction() throws ForeignException {
+ List files = null;
+ try {
+ files = Arrays.asList(rss.getFileSystem().listStatus(mobFamilyDir));
+ } catch (IOException e) {
+ throw new ForeignException(getMemberName(), e);
+ }
+ if (files.isEmpty()) {
+ return;
+ }
+ monitor.rethrowException();
+ Map prefixAndKeys = new HashMap();
+ // find the mapping from file prefix to startKey
+ for (FileStatus file : files) {
+ Path path = file.getPath();
+ if (HFileLink.isHFileLink(path)) {
+ HFileLink link;
+ try {
+ link = HFileLink.buildFromHFileLinkPattern(conf, path);
+ FileStatus linkedFile = MobUtils.getReferencedFileStatus(rss.getFileSystem(), link);
+ if (linkedFile == null) {
+ continue;
+ }
+ path = linkedFile.getPath();
+ } catch (IOException e) {
+ throw new ForeignException(getMemberName(), e);
+ }
+ }
+ String prefix = MobFileName.create(path.getName()).getStartKey();
+ if (prefixAndKeys.get(prefix) == null) {
+ StoreFile sf = null;
+ try {
+ sf = new StoreFile(rss.getFileSystem(), path, conf, cacheConfig, BloomType.NONE);
+ Reader reader = sf.createReader().getHFileReader();
+ Map fileInfo = reader.loadFileInfo();
+ byte[] startKey = fileInfo.get(StoreFile.MOB_REGION_STARTKEY);
+ if (startKey == null) {
+ // use the key of the first cell as the start key of a region where the mob file
+ // comes from.
+ startKey = reader.getFirstRowKey();
+ if (startKey == null) {
+ startKey = HConstants.EMPTY_START_ROW;
+ }
+ }
+ prefixAndKeys.put(prefix, startKey);
+ } catch (IOException e) {
+ throw new ForeignException(getMemberName(), e);
+ } finally {
+ if (sf != null) {
+ try {
+ sf.closeReader(false);
+ } catch (IOException e) {
+ LOG.warn("Failed to close the store file " + path, e);
+ }
+ }
+ }
+ }
+ }
+
+ List sortedStartKeys = new ArrayList(regions.size());
+ for (Region region : regions) {
+ // submit one task per region for parallelize by region.
+ taskManager.submitTask(new RegionMobCompactionTask(region, files, prefixAndKeys));
+ sortedStartKeys.add(region.getRegionInfo().getStartKey());
+ monitor.rethrowException();
+ }
+
+ // wait for everything to complete.
+ boolean success = false;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Mob compaction tasks submitted for " + regions.size() + " regions in table "
+ + tableName.getNameAsString());
+ }
+ try {
+ success = taskManager.waitForOutstandingTasks();
+ LOG.info("Mob compaction tasks of the table " + tableName.getNameAsString()
+ + " for region server " + rss.getServerName() + " are finished[" + success + "]");
+ } catch (InterruptedException e) {
+ throw new ForeignException(getMemberName(), e);
+ }
+ // add nodes to zookeeper if all the tasks are finished successfully
+ if (allRegionsOnline && success && !regions.isEmpty()) {
+ // compare the regions passed from master and existing regions in the current region server.
+ // if they are the same, it means all regions are online, all mob files owned by this region
+ // server can be compacted. We call update the mob compaction in this server as major.
+ try {
+ List currentRegionStartKeys = getCompactionRegions();
+ if (currentRegionStartKeys.size() == sortedStartKeys.size()) {
+ Collections.sort(sortedStartKeys, Bytes.BYTES_COMPARATOR);
+ for (int i = 0; i < currentRegionStartKeys.size(); i++) {
+ if (Bytes.BYTES_COMPARATOR.compare(currentRegionStartKeys.get(i),
+ sortedStartKeys.get(i))!= 0) {
+ return;
+ }
+ }
+ updateCompactionAsMajor();
+ }
+ } catch (ServiceException e) {
+ throw new ForeignException(getMemberName(), e);
+ } catch (IOException e) {
+ throw new ForeignException(getMemberName(), e);
+ }
+ }
+ }
+
+ /**
+ * Gets the regions that run the mob compaction.
+ * @return The start keys of regions that run the mob compaction.
+ */
+ private List getCompactionRegions() throws ServiceException, IOException {
+ MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest request =
+ MasterMobCompactionStatusProtos.GetMobCompactionRegionsRequest
+ .newBuilder().setServerName(ProtobufUtil.toServerName(rss.getServerName()))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
+ ClientProtos.CoprocessorServiceCall call = ClientProtos.CoprocessorServiceCall
+ .newBuilder()
+ .setRow(ByteStringer.wrap(HConstants.EMPTY_BYTE_ARRAY))
+ .setServiceName(
+ MasterMobCompactionStatusProtos.MasterMobCompactionStatusService.getDescriptor()
+ .getFullName())
+ .setMethodName(
+ MasterMobCompactionStatusProtos.MasterMobCompactionStatusService.getDescriptor()
+ .getMethods().get(0).getName()).setRequest(request.toByteString()).build();
+ CoprocessorServiceResponse servieResponse = ProtobufUtil.execService(null, rss
+ .getClusterConnection().getMaster(), call);
+ MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse response =
+ MasterMobCompactionStatusProtos.GetMobCompactionRegionsResponse
+ .parseFrom(servieResponse.getValue().getValue());
+ List results = new ArrayList(response.getRegionStartKeyCount());
+ for (ByteString bs : response.getRegionStartKeyList()) {
+ results.add(bs.toByteArray());
+ }
+ return results;
+ }
+
+ /**
+ * Updates the mob compaction as major in the current server.
+ */
+ private void updateCompactionAsMajor() throws ServiceException, IOException {
+ MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest request =
+ MasterMobCompactionStatusProtos.UpdateMobCompactionAsMajorRequest
+ .newBuilder().setServerName(ProtobufUtil.toServerName(rss.getServerName()))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
+ ClientProtos.CoprocessorServiceCall call = ClientProtos.CoprocessorServiceCall
+ .newBuilder()
+ .setRow(ByteStringer.wrap(HConstants.EMPTY_BYTE_ARRAY))
+ .setServiceName(
+ MasterMobCompactionStatusProtos.MasterMobCompactionStatusService.getDescriptor()
+ .getFullName())
+ .setMethodName(
+ MasterMobCompactionStatusProtos.MasterMobCompactionStatusService.getDescriptor()
+ .getMethods().get(1).getName()).setRequest(request.toByteString()).build();
+ ProtobufUtil.execService(null, rss.getClusterConnection().getMaster(), call);
+ }
+
+ // Callable for mob compaction.
+ private class RegionMobCompactionTask implements Callable {
+ Region region;
+ List files;
+ Map prefixAndKeys;
+
+ RegionMobCompactionTask(Region region, List files, Map prefixAndKeys) {
+ this.region = region;
+ this.files = files;
+ this.prefixAndKeys = prefixAndKeys;
+ }
+
+ @Override
+ public Boolean call() throws Exception {
+ LOG.debug("Starting region operation mob compaction on " + region);
+ region.startRegionOperation();
+ try {
+ LOG.debug("Mob compaction of region " + region.toString() + " started...");
+ return compactRegion();
+ } finally {
+ LOG.debug("Closing region operation mob compaction on " + region);
+ region.closeRegionOperation();
+ }
+ }
+
+ /**
+ * Performs mob compaction in the current region.
+ * @return True if all the files are selected.
+ * @throws IOException
+ */
+ private boolean compactRegion()
+ throws IOException {
+ HColumnDescriptor column = region.getTableDesc().getFamily(Bytes.toBytes(columnName));
+ PartitionedMobCompactor compactor = new PartitionedMobCompactor(rss, region, tableName,
+ column, prefixAndKeys);
+ compactor.compact(files, allFiles);
+ return compactor.getPartitionedMobCompactionRequest().getCompactionType() ==
+ CompactionType.ALL_FILES;
+ }
+ }
+
+ @Override
+ public byte[] insideBarrier() throws ForeignException {
+ // No-Op
+ return new byte[0];
+ }
+
+ @Override
+ public void cleanup(Exception e) {
+ LOG.info(
+ "Aborting all mob compaction subprocedure task threads for '" + procName
+ + "' due to error", e);
+ try {
+ taskManager.cancelTasks();
+ } catch (InterruptedException e1) {
+ Thread.currentThread().interrupt();
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
index 77de0cd..db319f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java
@@ -21,7 +21,6 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
-import java.util.concurrent.ExecutorService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@@ -31,6 +30,8 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.FSUtils;
/**
@@ -39,6 +40,8 @@
@InterfaceAudience.Private
public abstract class MobCompactor {
+ protected RegionServerServices rss;
+ protected Region region;
protected FileSystem fs;
protected Configuration conf;
protected TableName tableName;
@@ -46,15 +49,15 @@
protected Path mobTableDir;
protected Path mobFamilyDir;
- protected ExecutorService pool;
- public MobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor column, ExecutorService pool) {
- this.conf = conf;
- this.fs = fs;
+ public MobCompactor(RegionServerServices rss, Region region, TableName tableName,
+ HColumnDescriptor column) {
+ this.rss = rss;
+ this.region = region;
+ this.conf = rss.getConfiguration();
+ this.fs = rss.getFileSystem();
this.tableName = tableName;
this.column = column;
- this.pool = pool;
mobTableDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), tableName);
mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableName, column.getNameAsString());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
index 227f1e4..1209776 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
@@ -25,7 +25,6 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* An implementation of {@link MobCompactionRequest} that is used in
@@ -40,10 +39,10 @@
protected Collection compactionPartitions;
public PartitionedMobCompactionRequest(Collection compactionPartitions,
- Collection delFiles) {
- this.selectionTime = EnvironmentEdgeManager.currentTime();
+ Collection delFiles, long selectionTime) {
this.compactionPartitions = compactionPartitions;
this.delFiles = delFiles;
+ this.selectionTime = selectionTime;
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index b4d4bab..c99b0bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -18,8 +18,9 @@
*/
package org.apache.hadoop.hbase.mob.compactions;
-import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -27,29 +28,24 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.HFileLink;
@@ -64,6 +60,8 @@
import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.ScanInfo;
import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
@@ -74,6 +72,7 @@
import org.apache.hadoop.hbase.regionserver.StoreScanner;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
/**
@@ -94,10 +93,15 @@
private final CacheConfig compactionCacheConfig;
private final Tag tableNameTag;
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private PartitionedMobCompactionRequest request;
+ private Map prefixAndKeys;
+ private HRegionInfo regionInfo;
- public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
- HColumnDescriptor column, ExecutorService pool) throws IOException {
- super(conf, fs, tableName, column, pool);
+ public PartitionedMobCompactor(RegionServerServices rss, Region region, TableName tableName,
+ HColumnDescriptor column, Map prefixAndKeys) throws IOException {
+ super(rss, region, tableName, column);
+ this.prefixAndKeys = prefixAndKeys;
+ regionInfo = this.region.getRegionInfo();
mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);
delFileMaxCount = conf.getInt(MobConstants.MOB_DELFILE_MAX_COUNT,
@@ -110,11 +114,17 @@ public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tabl
tableName.getNamespaceAsString(), tableName.getQualifierAsString())));
compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX,
HConstants.COMPACTION_KV_MAX_DEFAULT);
- Configuration copyOfConf = new Configuration(conf);
- copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
- compactionCacheConfig = new CacheConfig(copyOfConf);
+ boolean cacheDataOnRead = conf.getBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY,
+ CacheConfig.DEFAULT_CACHE_DATA_ON_READ);
+ Configuration configration = conf;
+ if (cacheDataOnRead) {
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, Boolean.FALSE);
+ configration = copyOfConf;
+ }
+ compactionCacheConfig = new CacheConfig(configration);
tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
- cryptoContext = EncryptionUtil.createEncryptionContext(copyOfConf, column);
+ cryptoContext = EncryptionUtil.createEncryptionContext(configration, column);
}
@Override
@@ -125,12 +135,20 @@ public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tabl
}
LOG.info("is allFiles: " + allFiles);
// find the files to compact.
- PartitionedMobCompactionRequest request = select(files, allFiles);
+ request = select(files, allFiles);
// compact the files.
return performCompaction(request);
}
/**
+ * Gets the compaction request.
+ * @return The compaction request.
+ */
+ public PartitionedMobCompactionRequest getPartitionedMobCompactionRequest() {
+ return this.request;
+ }
+
+ /**
* Selects the compacted mob/del files.
* Iterates the candidates to find out all the del files and small mob files.
* @param candidates All the candidates.
@@ -140,10 +158,15 @@ public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tabl
*/
protected PartitionedMobCompactionRequest select(List candidates,
boolean allFiles) throws IOException {
- Collection allDelFiles = new ArrayList<>();
- Map filesToCompact = new HashMap<>();
+ long selectionTime = EnvironmentEdgeManager.currentTime();
+ Date expiredDate = new Date(selectionTime - column.getTimeToLive() * 1000);
+ expiredDate = new Date(expiredDate.getYear(), expiredDate.getMonth(), expiredDate.getDate());
+ Collection allDelFiles = new ArrayList();
+ Map filesToCompact =
+ new HashMap();
int selectedFileCount = 0;
int irrelevantFileCount = 0;
+ int expiredFileCount = 0;
for (FileStatus file : candidates) {
if (!file.isFile()) {
irrelevantFileCount++;
@@ -151,9 +174,21 @@ protected PartitionedMobCompactionRequest select(List candidates,
}
// group the del files and small files.
FileStatus linkedFile = file;
+ HFileLink link = null;
+ String fn = file.getPath().getName();
if (HFileLink.isHFileLink(file.getPath())) {
- HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
- linkedFile = getLinkedFileStatus(link);
+ link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
+ fn = link.getOriginPath().getName();
+ }
+ if (!StoreFileInfo.isDelFile((fn))) {
+ MobFileName fileName = MobFileName.create(fn);
+ if (!isOwnedByRegion(fileName.getStartKey())) {
+ irrelevantFileCount++;
+ continue;
+ }
+ }
+ if (link != null) {
+ linkedFile = MobUtils.getReferencedFileStatus(fs, link);
if (linkedFile == null) {
// If the linked file cannot be found, regard it as an irrelevantFileCount file
irrelevantFileCount++;
@@ -162,36 +197,53 @@ protected PartitionedMobCompactionRequest select(List candidates,
}
if (StoreFileInfo.isDelFile(linkedFile.getPath())) {
allDelFiles.add(file);
- } else if (allFiles || linkedFile.getLen() < mergeableSize) {
- // add all files if allFiles is true,
- // otherwise add the small files to the merge pool
+ } else {
MobFileName fileName = MobFileName.create(linkedFile.getPath().getName());
- CompactionPartitionId id = new CompactionPartitionId(fileName.getStartKey(),
- fileName.getDate());
- CompactionPartition compactionPartition = filesToCompact.get(id);
- if (compactionPartition == null) {
- compactionPartition = new CompactionPartition(id);
- compactionPartition.addFile(file);
- filesToCompact.put(id, compactionPartition);
- } else {
- compactionPartition.addFile(file);
+ if (isExpiredMobFile(fileName, expiredDate)) {
+ expiredFileCount++;
+ continue;
+ }
+ if (allFiles || linkedFile.getLen() < mergeableSize) {
+ // add all files if allFiles is true,
+ // otherwise add the small files to the merge pool
+ CompactionPartitionId id = new CompactionPartitionId(fileName.getStartKey(),
+ fileName.getDate());
+ CompactionPartition compactionPartition = filesToCompact.get(id);
+ if (compactionPartition == null) {
+ compactionPartition = new CompactionPartition(id);
+ compactionPartition.addFile(file);
+ filesToCompact.put(id, compactionPartition);
+ } else {
+ compactionPartition.addFile(file);
+ }
+ selectedFileCount++;
}
- selectedFileCount++;
}
}
PartitionedMobCompactionRequest request = new PartitionedMobCompactionRequest(
- filesToCompact.values(), allDelFiles);
- if (candidates.size() == (allDelFiles.size() + selectedFileCount + irrelevantFileCount)) {
+ filesToCompact.values(), allDelFiles, selectionTime);
+ if (candidates.size() == (allDelFiles.size() + selectedFileCount + irrelevantFileCount +
+ expiredFileCount)) {
// all the files are selected
request.setCompactionType(CompactionType.ALL_FILES);
}
LOG.info("The compaction type is " + request.getCompactionType() + ", the request has "
- + allDelFiles.size() + " del files, " + selectedFileCount + " selected files, and "
- + irrelevantFileCount + " irrelevant files");
+ + allDelFiles.size() + " del files, " + selectedFileCount + " selected files, "
+ + irrelevantFileCount + " irrelevant files, and " + expiredFileCount + " expired files");
return request;
}
/**
+ * Gets whether the start key is owned by the current region.
+ * @param prefix The prefix of the mob file name.
+ * @return True if the start key is owned by the current region.
+ */
+ private boolean isOwnedByRegion(String prefix) {
+ byte[] startKey = prefixAndKeys.get(prefix);
+ return regionInfo.containsRow(startKey);
+ }
+
+ /**
* Performs the compaction on the selected files.
*
* - Compacts the del files.
@@ -205,37 +257,24 @@ protected PartitionedMobCompactionRequest select(List candidates,
protected List performCompaction(PartitionedMobCompactionRequest request)
throws IOException {
// merge the del files
- List delFilePaths = new ArrayList<>();
+ List delFilePaths = new ArrayList();
for (FileStatus delFile : request.delFiles) {
delFilePaths.add(delFile.getPath());
}
- List newDelPaths = compactDelFiles(request, delFilePaths);
- List newDelFiles = new ArrayList<>();
+ List delFiles = new ArrayList();
List paths = null;
try {
- for (Path newDelPath : newDelPaths) {
- StoreFile sf = new StoreFile(fs, newDelPath, conf, compactionCacheConfig, BloomType.NONE);
- // pre-create reader of a del file to avoid race condition when opening the reader in each
- // partition.
+ for (Path delFilePath : delFilePaths) {
+ StoreFile sf = new StoreFile(fs, delFilePath, conf, compactionCacheConfig, BloomType.NONE);
sf.createReader();
- newDelFiles.add(sf);
+ delFiles.add(sf);
}
- LOG.info("After merging, there are " + newDelFiles.size() + " del files");
+ LOG.info("After merging, there are " + delFiles.size() + " del files");
// compact the mob files by partitions.
- paths = compactMobFiles(request, newDelFiles);
+ paths = compactMobFiles(request, delFiles);
LOG.info("After compaction, there are " + paths.size() + " mob files");
} finally {
- closeStoreFileReaders(newDelFiles);
- }
- // archive the del files if all the mob files are selected.
- if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) {
- LOG.info("After a mob compaction with all files selected, archiving the del files "
- + newDelPaths);
- try {
- MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), newDelFiles);
- } catch (IOException e) {
- LOG.error("Failed to archive the del files " + newDelPaths, e);
- }
+ closeStoreFileReaders(delFiles);
}
return paths;
}
@@ -251,44 +290,36 @@ protected PartitionedMobCompactionRequest select(List candidates,
final List delFiles) throws IOException {
Collection partitions = request.compactionPartitions;
if (partitions == null || partitions.isEmpty()) {
- LOG.info("No partitions of mob files");
+ LOG.info("No partitions of mob files in the table " + tableName);
return Collections.emptyList();
}
- List paths = new ArrayList<>();
- final Connection c = ConnectionFactory.createConnection(conf);
+ List paths = new ArrayList();
+ Connection c = rss.getConnection();
final Table table = c.getTable(tableName);
try {
- Map>> results = new HashMap<>();
- // compact the mob files by partitions in parallel.
+ List failedPartitions = new ArrayList();
for (final CompactionPartition partition : partitions) {
- results.put(partition.getPartitionId(), pool.submit(new Callable>() {
- @Override
- public List call() throws Exception {
- LOG.info("Compacting mob files for partition " + partition.getPartitionId());
- return compactMobFilePartition(request, partition, delFiles, c, table);
- }
- }));
- }
- // compact the partitions in parallel.
- List failedPartitions = new ArrayList<>();
- for (Entry>> result : results.entrySet()) {
+ LOG.info("Compacting mob files for partition " + partition.getPartitionId()
+ + " in the table " + tableName);
try {
- paths.addAll(result.getValue().get());
+ paths.addAll(compactMobFilePartition(request, partition, delFiles, c, table));
} catch (Exception e) {
// just log the error
- LOG.error("Failed to compact the partition " + result.getKey(), e);
- failedPartitions.add(result.getKey());
+ LOG.error("Failed to compact the partition " + partition.getPartitionId()
+ + " in the table " + tableName, e);
+ failedPartitions.add(partition.getPartitionId());
}
}
if (!failedPartitions.isEmpty()) {
// if any partition fails in the compaction, directly throw an exception.
- throw new IOException("Failed to compact the partitions " + failedPartitions);
+ throw new IOException("Failed to compact the partitions " + failedPartitions
+ + " in the table " + tableName);
}
} finally {
try {
table.close();
} catch (IOException e) {
- LOG.error("Failed to close the Table", e);
+ LOG.error("Failed to close the table " + tableName, e);
}
}
return paths;
@@ -308,7 +339,7 @@ protected PartitionedMobCompactionRequest select(List candidates,
List delFiles,
Connection connection,
Table table) throws IOException {
- List newFiles = new ArrayList<>();
+ List newFiles = new ArrayList();
List files = partition.listFiles();
int offset = 0;
Path bulkloadPathOfPartition = new Path(bulkloadPath, partition.getPartitionId().toString());
@@ -328,7 +359,7 @@ protected PartitionedMobCompactionRequest select(List candidates,
// clean the bulkload directory to avoid loading old files.
fs.delete(bulkloadPathOfPartition, true);
// add the selected mob files and del files into filesToCompact
- List filesToCompact = new ArrayList<>();
+ List filesToCompact = new ArrayList();
for (int i = offset; i < batch + offset; i++) {
StoreFile sf = new StoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig,
BloomType.NONE);
@@ -369,9 +400,9 @@ private void closeStoreFileReaders(List storeFiles) {
* @param filesToCompact The files to be compacted.
* @param batch The number of mob files to be compacted in a batch.
* @param bulkloadPathOfPartition The directory where the bulkload column of the current
- * partition is saved.
+ * partition is saved.
* @param bulkloadColumnPath The directory where the bulkload files of current partition
- * are saved.
+ * are saved.
* @param newFiles The paths of new mob files after compactions.
* @throws IOException if IO failure is encountered
*/
@@ -395,16 +426,21 @@ private void compactMobFilesInBatch(PartitionedMobCompactionRequest request,
Path refFilePath = null;
long mobCells = 0;
try {
- writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getDate(),
- tempPath, Long.MAX_VALUE, column.getCompactionCompression(), partition.getPartitionId()
- .getStartKey(), compactionCacheConfig, cryptoContext);
+ InetSocketAddress[] favoredNodes = null;
+ if (rss != null && regionInfo.getEncodedName() != null) {
+ favoredNodes = rss.getFavoredNodesForRegion(regionInfo.getEncodedName());
+ }
+ writer = MobUtils.createWriter(favoredNodes, conf, fs,
+ column, partition.getPartitionId().getDate(), tempPath, Long.MAX_VALUE,
+ column.getCompactionCompression(), partition.getPartitionId().getStartKey(),
+ compactionCacheConfig, cryptoContext);
filePath = writer.getPath();
byte[] fileName = Bytes.toBytes(filePath.getName());
// create a temp file and open a writer for it in the bulkloadPath
refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo
.getSecond().longValue(), compactionCacheConfig, cryptoContext);
refFilePath = refFileWriter.getPath();
- List cells = new ArrayList<>();
+ List cells = new ArrayList();
boolean hasMore;
ScannerContext scannerContext =
ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
@@ -424,7 +460,7 @@ private void compactMobFilesInBatch(PartitionedMobCompactionRequest request,
// close the scanner.
scanner.close();
// append metadata to the mob file, and close the mob file writer.
- closeMobFileWriter(writer, fileInfo.getFirst(), mobCells);
+ closeMobFileWriter(writer, fileInfo.getFirst(), mobCells, regionInfo.getStartKey());
// append metadata and bulkload info to the ref mob file, and close the writer.
closeRefFileWriter(refFileWriter, fileInfo.getFirst(), request.selectionTime);
}
@@ -452,98 +488,6 @@ private void compactMobFilesInBatch(PartitionedMobCompactionRequest request,
}
/**
- * Compacts the del files in batches which avoids opening too many files.
- * @param request The compaction request.
- * @param delFilePaths Del file paths to compact
- * @return The paths of new del files after merging or the original files if no merging
- * is necessary.
- * @throws IOException if IO failure is encountered
- */
- protected List compactDelFiles(PartitionedMobCompactionRequest request,
- List delFilePaths) throws IOException {
- if (delFilePaths.size() <= delFileMaxCount) {
- return delFilePaths;
- }
- // when there are more del files than the number that is allowed, merge it firstly.
- int offset = 0;
- List paths = new ArrayList<>();
- while (offset < delFilePaths.size()) {
- // get the batch
- int batch = compactionBatchSize;
- if (delFilePaths.size() - offset < compactionBatchSize) {
- batch = delFilePaths.size() - offset;
- }
- List batchedDelFiles = new ArrayList<>();
- if (batch == 1) {
- // only one file left, do not compact it, directly add it to the new files.
- paths.add(delFilePaths.get(offset));
- offset++;
- continue;
- }
- for (int i = offset; i < batch + offset; i++) {
- batchedDelFiles.add(new StoreFile(fs, delFilePaths.get(i), conf, compactionCacheConfig,
- BloomType.NONE));
- }
- // compact the del files in a batch.
- paths.add(compactDelFilesInBatch(request, batchedDelFiles));
- // move to the next batch.
- offset += batch;
- }
- return compactDelFiles(request, paths);
- }
-
- /**
- * Compacts the del file in a batch.
- * @param request The compaction request.
- * @param delFiles The del files.
- * @return The path of new del file after merging.
- * @throws IOException if IO failure is encountered
- */
- private Path compactDelFilesInBatch(PartitionedMobCompactionRequest request,
- List delFiles) throws IOException {
- // create a scanner for the del files.
- StoreScanner scanner = createScanner(delFiles, ScanType.COMPACT_RETAIN_DELETES);
- StoreFileWriter writer = null;
- Path filePath = null;
- try {
- writer = MobUtils.createDelFileWriter(conf, fs, column,
- MobUtils.formatDate(new Date(request.selectionTime)), tempPath, Long.MAX_VALUE,
- column.getCompactionCompression(), HConstants.EMPTY_START_ROW, compactionCacheConfig,
- cryptoContext);
- filePath = writer.getPath();
- List cells = new ArrayList<>();
- boolean hasMore;
- ScannerContext scannerContext =
- ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
- do {
- hasMore = scanner.next(cells, scannerContext);
- for (Cell cell : cells) {
- writer.append(cell);
- }
- cells.clear();
- } while (hasMore);
- } finally {
- scanner.close();
- if (writer != null) {
- try {
- writer.close();
- } catch (IOException e) {
- LOG.error("Failed to close the writer of the file " + filePath, e);
- }
- }
- }
- // commit the new del file
- Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
- // archive the old del files
- try {
- MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
- } catch (IOException e) {
- LOG.error("Failed to archive the old del files " + delFiles, e);
- }
- return path;
- }
-
- /**
* Creates a store scanner.
* @param filesToCompact The files to be compacted.
* @param scanType The scan type.
@@ -597,12 +541,13 @@ private void bulkloadRefFile(Connection connection, Table table, Path bulkloadDi
* @param writer The mob file writer.
* @param maxSeqId Maximum sequence id.
* @param mobCellsCount The number of mob cells.
+ * @param startKey The start key of the region where the mob file comes from.
* @throws IOException if IO failure is encountered
*/
- private void closeMobFileWriter(StoreFileWriter writer, long maxSeqId, long mobCellsCount)
- throws IOException {
+ private void closeMobFileWriter(StoreFileWriter writer, long maxSeqId, long mobCellsCount,
+ byte[] startKey) throws IOException {
if (writer != null) {
- writer.appendMetadata(maxSeqId, false, mobCellsCount);
+ writer.appendMetadata(maxSeqId, false, mobCellsCount, startKey);
try {
writer.close();
} catch (IOException e) {
@@ -666,26 +611,21 @@ private void deletePath(Path path) {
}
}
- private FileStatus getLinkedFileStatus(HFileLink link) throws IOException {
- Path[] locations = link.getLocations();
- for (Path location : locations) {
- FileStatus file = getFileStatus(location);
- if (file != null) {
- return file;
- }
- }
- return null;
- }
-
- private FileStatus getFileStatus(Path path) throws IOException {
+ /**
+ * Gets if the given mob file is expired.
+ * @param mobFileName The name of a mob file.
+ * @param expiredDate The expired date.
+ * @return True if the mob file is expired.
+ */
+ private boolean isExpiredMobFile(MobFileName mobFileName, Date expiredDate) {
try {
- if (path != null) {
- FileStatus file = fs.getFileStatus(path);
- return file;
+ Date fileDate = MobUtils.parseDate(mobFileName.getDate());
+ if (fileDate.getTime() < expiredDate.getTime()) {
+ return true;
}
- } catch (FileNotFoundException e) {
- LOG.warn("The file " + path + " can not be found", e);
+ } catch (ParseException e) {
+ // do nothing
}
- return null;
+ return false;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/RegionServerMobCompactionProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/RegionServerMobCompactionProcedureManager.java
new file mode 100644
index 0000000..2bd0429
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/RegionServerMobCompactionProcedureManager.java
@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.compactions;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DaemonThreadFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.master.MobCompactionManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.procedure.ProcedureMember;
+import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
+import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
+import org.apache.hadoop.hbase.procedure.Subprocedure;
+import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
+import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This manager class handles mob compaction for table on a {@link HRegionServer}.
+ */
+@InterfaceAudience.Private
+public class RegionServerMobCompactionProcedureManager extends RegionServerProcedureManager {
+ private static final Log LOG = LogFactory.getLog(RegionServerMobCompactionProcedureManager.class);
+
+ private RegionServerServices rss;
+ private ProcedureMemberRpcs memberRpcs;
+ private ProcedureMember member;
+ private static final String CONCURENT_MOB_COMPACTION_TASKS_KEY =
+ "hbase.mob.compaction.procedure.concurrent.tasks";
+ private static final int DEFAULT_CONCURRENT_MOB_COMPACTION_TASKS = 10;
+ public static final String MOB_COMPACTION_PROCEDURE_POOL_THREADS_KEY =
+ "hbase.mob.compaction.procedure.pool.threads";
+ public static final int MOB_COMPACTION_PROCEDURE_POOL_THREADS_DEFAULT = 5;
+
+ public static final String MOB_COMPACTION_TIMEOUT_MILLIS_KEY =
+ "hbase.mob.compaction.procedure.timeout";
+ public static final long MOB_COMPACTION_TIMEOUT_MILLIS_DEFAULT = 60000;
+
+ public static final String MOB_COMPACTION_WAKE_MILLIS_KEY =
+ "hbase.mob.compaction.procedure.wakefrequency";
+ private static final long MOB_COMPACTION_WAKE_MILLIS_DEFAULT = 500;
+ Set tablesToCompact = new HashSet();
+
+ @Override
+ public void initialize(RegionServerServices rss) throws KeeperException {
+ this.rss = rss;
+ ZooKeeperWatcher zkw = rss.getZooKeeper();
+ this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
+ MobCompactionManager.MOB_COMPACTION_PROCEDURE_SIGNATURE);
+
+ Configuration conf = rss.getConfiguration();
+ long keepAlive = conf.getLong(MOB_COMPACTION_TIMEOUT_MILLIS_KEY,
+ MOB_COMPACTION_TIMEOUT_MILLIS_DEFAULT);
+ int opThreads = conf.getInt(MOB_COMPACTION_PROCEDURE_POOL_THREADS_KEY,
+ MOB_COMPACTION_PROCEDURE_POOL_THREADS_DEFAULT);
+
+ // create the actual mob compaction procedure member
+ ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(),
+ opThreads, keepAlive);
+ this.member = new ProcedureMember(memberRpcs, pool, new MobCompactionSubprocedureBuilder());
+ }
+
+ /**
+ * Starts accepting mob compaction requests.
+ */
+ @Override
+ public void start() {
+ LOG.debug("Start region server mob compaction procedure manager "
+ + rss.getServerName().toString());
+ this.memberRpcs.start(rss.getServerName().toString(), member);
+ }
+
+ /**
+ * Closes this and all running tasks
+ * @param force forcefully stop all running tasks
+ * @throws IOException
+ */
+ @Override
+ public void stop(boolean force) throws IOException {
+ String mode = force ? "abruptly" : "gracefully";
+ LOG.info("Stopping region server mob compaction procedure manager " + mode + ".");
+
+ try {
+ this.member.close();
+ } finally {
+ this.memberRpcs.close();
+ }
+ }
+
+ @Override
+ public String getProcedureSignature() {
+ return MobCompactionManager.MOB_COMPACTION_PROCEDURE_SIGNATURE;
+ }
+
+ /**
+ * Creates a specified subprocedure to compact mob files.
+ *
+ * @param procName the procedure name.
+ * @param data the arguments passed in master side.
+ * @return Subprocedure to submit to the ProcedureMemeber.
+ */
+ public Subprocedure buildSubprocedure(String procName, byte[] data) {
+ TableName tableName = TableName.valueOf(procName.substring(MobConstants.MOB_COMPACTION_PREFIX
+ .length()));
+ // don't run the subprocedure if the parent is stop(ping)
+ if (rss.isStopping() || rss.isStopped()) {
+ throw new IllegalStateException("Can't start mob compaction subprocedure on RS: "
+ + rss.getServerName() + ", because stopping/stopped!");
+ }
+ // check to see if this server is hosting any regions for the table
+ List involvedRegions;
+ try {
+ involvedRegions = rss.getOnlineRegions(tableName);
+ } catch (IOException e1) {
+ throw new IllegalStateException(
+ "Failed to get the online regions for mob compaction in table "
+ + tableName.getNameAsString() + ".", e1);
+ }
+ // parse the column names and if it is a major compaction
+ boolean allFiles = (data[0] != (byte) 0);
+ boolean allRegionsOnline = (data[1] != (byte) 0);
+ String columnName = Bytes.toString(data, 2, data.length - 2);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Launching subprocedure to compact mob files for " + procName);
+ }
+ ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(procName);
+ Configuration conf = rss.getConfiguration();
+ long timeoutMillis = conf.getLong(MOB_COMPACTION_TIMEOUT_MILLIS_KEY,
+ MOB_COMPACTION_TIMEOUT_MILLIS_DEFAULT);
+ long wakeMillis = conf.getLong(MOB_COMPACTION_WAKE_MILLIS_KEY,
+ MOB_COMPACTION_WAKE_MILLIS_DEFAULT);
+
+ MobCompactionSubprocedurePool taskManager = new MobCompactionSubprocedurePool(rss
+ .getServerName().toString(), conf);
+
+ return new MobCompactionSubprocedure(this, member, procName, exnDispatcher, wakeMillis,
+ timeoutMillis, rss, involvedRegions, tableName, columnName, taskManager, allFiles,
+ allRegionsOnline);
+ }
+
+ public class MobCompactionSubprocedureBuilder implements SubprocedureFactory {
+ @Override
+ public Subprocedure buildSubprocedure(String name, byte[] data) {
+ return RegionServerMobCompactionProcedureManager.this.buildSubprocedure(name, data);
+ }
+ }
+
+ /**
+ * We use the MobCompactionSubprocedurePool, a class specific thread pool instead of
+ * {@link org.apache.hadoop.hbase.executor.ExecutorService}.
+ *
+ * It uses a {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of
+ * all tasks which lets us efficiently cancel pending tasks upon the earliest operation
+ * failures.
+ */
+ static class MobCompactionSubprocedurePool {
+ private final ExecutorCompletionService taskPool;
+ private final ThreadPoolExecutor executor;
+ private volatile boolean stopped;
+ private final List> futures = new ArrayList>();
+ private final String name;
+
+ MobCompactionSubprocedurePool(String name, Configuration conf) {
+ // configure the executor service
+ long keepAlive = conf.getLong(MOB_COMPACTION_TIMEOUT_MILLIS_KEY,
+ MOB_COMPACTION_TIMEOUT_MILLIS_DEFAULT);
+ int threads = conf.getInt(CONCURENT_MOB_COMPACTION_TASKS_KEY,
+ DEFAULT_CONCURRENT_MOB_COMPACTION_TASKS);
+ this.name = name;
+ executor = new ThreadPoolExecutor(threads, threads, keepAlive, TimeUnit.MILLISECONDS,
+ new LinkedBlockingQueue(), new DaemonThreadFactory("rs(" + name
+ + ")-mobCompaction-proc-pool"));
+ executor.allowCoreThreadTimeOut(true);
+ taskPool = new ExecutorCompletionService(executor);
+ }
+
+ boolean hasTasks() {
+ return futures.size() != 0;
+ }
+
+ /**
+ * Submit a task to the pool.
+ *
+ * NOTE: all must be submitted before you can safely {@link #waitForOutstandingTasks()}.
+ */
+ void submitTask(final Callable task) {
+ Future f = this.taskPool.submit(task);
+ futures.add(f);
+ }
+
+ /**
+ * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}.
+ * This *must* be called after all tasks are submitted via submitTask.
+ *
+ * @return true on success, false otherwise
+ * @throws InterruptedException
+ */
+ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException {
+ LOG.debug("Waiting for local region mob compaction to finish.");
+
+ int sz = futures.size();
+ try {
+ boolean success = true;
+ ExecutionException existingException = null;
+ // Using the completion service to process the futures.
+ for (int i = 0; i < sz; i++) {
+ Future f = taskPool.take();
+ try {
+ success = f.get() && success;
+ } catch (ExecutionException e) {
+ success = false;
+ existingException = e; // only throw the last exception
+ LOG.error("Got Exception in MobCompactionSubprocedurePool" + (i + 1), e);
+ }
+ if (!futures.remove(f)) {
+ LOG.warn("unexpected future" + f);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Completed " + (i + 1) + "/" + sz + " local region mob compaction tasks.");
+ }
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Completed " + sz + " local region mob compaction tasks.");
+ }
+ if (existingException != null) {
+ throw existingException;
+ }
+ return success;
+ } catch (InterruptedException e) {
+ LOG.warn("Got InterruptedException in MobCompactionSubprocedurePool", e);
+ if (!stopped) {
+ Thread.currentThread().interrupt();
+ throw new ForeignException("MobCompactionSubprocedurePool", e);
+ }
+ // we are stopped so we can just exit.
+ } catch (ExecutionException e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof ForeignException) {
+ throw (ForeignException) e.getCause();
+ }
+ throw new ForeignException(name, e.getCause());
+ } finally {
+ cancelTasks();
+ }
+ return false;
+ }
+
+ /**
+ * This attempts to cancel out all pending and in progress tasks. Does not interrupt the running
+ * tasks itself.
+ *
+ * @throws InterruptedException
+ */
+ void cancelTasks() throws InterruptedException {
+ Collection> tasks = futures;
+ LOG.debug("cancelling " + tasks.size() + " mob compaction tasks " + name);
+ for (Future f : tasks) {
+ f.cancel(false);
+ }
+
+ // evict remaining tasks and futures from taskPool.
+ futures.clear();
+ while (taskPool.poll() != null) {
+ }
+ stop();
+ }
+
+ /**
+ * Gracefully shutdown the thread pool.
+ */
+ void stop() {
+ if (this.stopped)
+ return;
+
+ this.stopped = true;
+ this.executor.shutdown();
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
index 7997b49..cb85bd6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
@@ -132,9 +132,9 @@ private void internalFlushCache(final MemStoreSnapshot snapshot)
}
// generate the files into a temp directory.
String tempPathString = context.getConfiguration().get(SweepJob.WORKING_FILES_DIR_KEY);
- StoreFileWriter mobFileWriter = MobUtils.createWriter(conf, fs, hcd, partitionId.getDate(),
- new Path(tempPathString), snapshot.getCellsCount(), hcd.getCompactionCompression(),
- partitionId.getStartKey(), cacheConfig, cryptoContext);
+ StoreFileWriter mobFileWriter = MobUtils.createWriter(null, conf, fs, hcd,
+ partitionId.getDate(), new Path(tempPathString), snapshot.getCellsCount(),
+ hcd.getCompactionCompression(), partitionId.getStartKey(), cacheConfig, cryptoContext);
String relativePath = mobFileWriter.getPath().getName();
LOG.info("Create files under a temp directory " + mobFileWriter.getPath().toString());
@@ -148,7 +148,8 @@ private void internalFlushCache(final MemStoreSnapshot snapshot)
scanner.close();
// Write out the log sequence number that corresponds to this output
// hfile. The hfile is current up to and including logCacheFlushId.
- mobFileWriter.appendMetadata(Long.MAX_VALUE, false, snapshot.getCellsCount());
+ mobFileWriter.appendMetadata(Long.MAX_VALUE, false, snapshot.getCellsCount(),
+ HConstants.EMPTY_START_ROW);
mobFileWriter.close();
MobUtils.commitFile(conf, fs, mobFileWriter.getPath(), mobFamilyDir, cacheConfig);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
index 0f4ea64..591d257 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
@@ -22,6 +22,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.mob.compactions.RegionServerMobCompactionProcedureManager;
import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
@@ -73,6 +74,8 @@ public void loadProcedures(Configuration conf) {
procedures.add(new RegionServerSnapshotManager());
// load the default flush region procedure manager
procedures.add(new RegionServerFlushTableProcedureManager());
+ // load the mob compaction procedure manager
+ procedures.add(new RegionServerMobCompactionProcedureManager());
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index 8634e37..5314105 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -19,6 +19,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
@@ -258,11 +259,16 @@ public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath,
.withHBaseCheckSum(true).withDataBlockEncoding(getFamily().getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
.withCreateTime(EnvironmentEdgeManager.currentTime()).build();
-
+ InetSocketAddress[] favoredNodes = null;
+ if (region.getRegionServerServices() != null) {
+ favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion(
+ region.getRegionInfo().getEncodedName());
+ }
StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, region.getFilesystem())
- .withFilePath(new Path(basePath, mobFileName.getFileName()))
- .withComparator(CellComparator.COMPARATOR).withBloomType(BloomType.NONE)
- .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
+ .withFilePath(new Path(basePath, mobFileName.getFileName()))
+ .withComparator(CellComparator.COMPARATOR).withBloomType(BloomType.NONE)
+ .withMaxKeyCount(maxKeyCount).withFavoredNodes(favoredNodes).withFileContext(hFileContext)
+ .build();
return w;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6522fde..62b66c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1137,7 +1137,7 @@ public HRegionInfo getRegionInfo() {
* @return Instance of {@link RegionServerServices} used by this HRegion.
* Can be null.
*/
- RegionServerServices getRegionServerServices() {
+ public RegionServerServices getRegionServerServices() {
return this.rsServices;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 46f0cb8..911a0ca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -97,6 +97,9 @@
/** Key for the number of mob cells in metadata*/
public static final byte[] MOB_CELLS_COUNT = Bytes.toBytes("MOB_CELLS_COUNT");
+ /** Key in metadata for the start key of the region where the mob file comes from*/
+ public static final byte[] MOB_REGION_STARTKEY = Bytes.toBytes("MOB_REGION_STARTKEY");
+
private final StoreFileInfo fileInfo;
private final FileSystem fs;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
index 4a42b7f..d9cc144 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
@@ -152,13 +152,15 @@ public void appendMetadata(final long maxSequenceId, final boolean majorCompacti
* @param maxSequenceId Maximum sequence id.
* @param majorCompaction True if this file is product of a major compaction
* @param mobCellsCount The number of mob cells.
+ * @param startKey The start key of the region where the mob file comes from.
* @throws IOException problem writing to FS
*/
public void appendMetadata(final long maxSequenceId, final boolean majorCompaction,
- final long mobCellsCount) throws IOException {
+ final long mobCellsCount, byte[] startKey) throws IOException {
writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId));
writer.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction));
writer.appendFileInfo(StoreFile.MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount));
+ writer.appendFileInfo(StoreFile.MOB_REGION_STARTKEY, startKey);
appendTrackedTimestampsToMetadata();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 60b62e4..b949d99 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -331,4 +331,9 @@ public SnapshotManager getSnapshotManager() {
public MasterProcedureManagerHost getMasterProcedureManagerHost() {
return null;
}
+
+ @Override
+ public MobCompactionManager getMobCompactionManager() {
+ return null;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
index c0ad2dd..f3064e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
@@ -26,9 +26,12 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
+import java.util.Comparator;
import java.util.List;
import java.util.Random;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.SynchronousQueue;
@@ -68,6 +71,7 @@
import org.apache.hadoop.hbase.io.crypto.aes.AES;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.BloomType;
@@ -79,6 +83,8 @@
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.SortedList;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -154,10 +160,6 @@ public void setUp(String tableNameAsString) throws IOException {
@Test(timeout = 300000)
public void testMinorCompaction() throws Exception {
resetConf();
- int mergeSize = 5000;
- // change the mob compaction merge size
- conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
-
// create a table with namespace
NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build();
String tableNameAsString = "ns:testMinorCompaction";
@@ -175,7 +177,10 @@ public void testMinorCompaction() throws Exception {
assertEquals("Before deleting: mob file count", regionNum * count,
countFiles(tableName, true, family1));
- int largeFilesCount = countLargeFiles(mergeSize, tableName, family1);
+ Pair largerFiles = countLargeFiles(tableName, family1);
+ int largeFilesCount = largerFiles.getFirst();
+ // change the mob compaction merge size
+ setLongConf(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, largerFiles.getSecond());
createDelFile(table, tableName, Bytes.toBytes(family1), Bytes.toBytes(qf1));
assertEquals("Before compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum),
@@ -192,8 +197,7 @@ public void testMinorCompaction() throws Exception {
countFiles(tableName, false, family2));
// do the mob file compaction
- MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool);
- compactor.compact();
+ compact(tableName, hcd1);
assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum),
countMobRows(table));
@@ -201,7 +205,8 @@ public void testMinorCompaction() throws Exception {
* (cellNumPerRow * rowNumPerRegion - delCellNum), countMobCells(table));
// After the compaction, the files smaller than the mob compaction merge size
// is merge to one file
- assertEquals("After compaction: family1 mob file count", largeFilesCount + regionNum,
+ assertEquals("After compaction: family1 mob file count",
+ (largeFilesCount == regionNum * count) ? largeFilesCount : largeFilesCount + regionNum,
countFiles(tableName, true, family1));
assertEquals("After compaction: family2 mob file count", regionNum * count,
countFiles(tableName, true, family2));
@@ -211,15 +216,15 @@ public void testMinorCompaction() throws Exception {
countFiles(tableName, false, family2));
}
- @Test(timeout = 300000)
- public void testCompactionWithHFileLink() throws IOException, InterruptedException {
+ @Test
+ public void testCompactionWithHFileLink() throws IOException, InterruptedException,
+ ExecutionException {
resetConf();
- String tableNameAsString = "testCompactionWithHFileLink";
- setUp(tableNameAsString);
+ setUp("testCompactionWithHFileLink");
int count = 4;
// generate mob files
loadData(admin, bufMut, tableName, count, rowNumPerFile);
- int rowNumPerRegion = count * rowNumPerFile;
+ int rowNumPerRegion = count*rowNumPerFile;
long tid = System.currentTimeMillis();
byte[] snapshotName1 = Bytes.toBytes("snaptb-" + tid);
@@ -242,8 +247,7 @@ public void testCompactionWithHFileLink() throws IOException, InterruptedExcepti
countFiles(tableName, false, family2));
// do the mob compaction
- MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool);
- compactor.compact();
+ compact(tableName, hcd1);
assertEquals("After first compaction: mob rows count", regionNum
* (rowNumPerRegion - delRowNum), countMobRows(table));
@@ -281,7 +285,7 @@ public void testCompactionWithHFileLink() throws IOException, InterruptedExcepti
countHFileLinks(family1));
assertEquals("After restoring snapshot: family2 hfilelink count", 0, countHFileLinks(family2));
- compactor.compact();
+ compact(tableName, hcd1);
assertEquals("After second compaction: mob rows count", regionNum * rowNumPerRegion,
countMobRows(table));
@@ -305,7 +309,7 @@ public void testMajorCompactionFromAdmin() throws Exception {
resetConf();
int mergeSize = 5000;
// change the mob compaction merge size
- conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
+ setLongConf(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
String tableNameAsString = "testMajorCompactionFromAdmin";
SecureRandom rng = new SecureRandom();
byte[] keyBytes = new byte[AES.KEY_LENGTH];
@@ -511,9 +515,7 @@ private int countMobCells(final Table table) throws IOException {
ResultScanner results = table.getScanner(scan);
int count = 0;
for (Result res : results) {
- for (Cell cell : res.listCells()) {
- count++;
- }
+ count += res.size();
}
results.close();
return count;
@@ -592,19 +594,34 @@ private int countHFileLinks(String familyName) throws IOException {
* @param familyName the family name
* @return the number of files large than the size
*/
- private int countLargeFiles(int size, TableName tableName, String familyName) throws IOException {
+ private Pair countLargeFiles(TableName tableName, String familyName)
+ throws IOException {
Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
- int count = 0;
+ SortedList fileLength = new SortedList(new Comparator() {
+ @Override
+ public int compare(Long o1, Long o2) {
+ return o1 == o2 ? 0 : (o1 > o2 ? 1 : -1);
+ }
+ });
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
for (FileStatus file : files) {
// ignore the del files in the mob path
- if ((!StoreFileInfo.isDelFile(file.getPath())) && (file.getLen() > size)) {
- count++;
+ if (!StoreFileInfo.isDelFile(file.getPath())) {
+ fileLength.add(file.getLen());
}
}
}
- return count;
+ int index = fileLength.size() / 2;
+ if (index > 0) {
+ while (index < fileLength.size()
+ && fileLength.get(index - 1).longValue() == fileLength.get(index).longValue()) {
+ index++;
+ }
+ }
+ return index == fileLength.size() ? new Pair(fileLength.size(),
+ fileLength.get(0))
+ : new Pair(fileLength.size() - index, fileLength.get(index));
}
/**
@@ -670,6 +687,7 @@ private void createDelFile(Table table, TableName tableName, byte[] family, byte
region.compact(true);
}
}
+
/**
* Creates the dummy data with a specific size.
* @param size the size of value
@@ -710,7 +728,7 @@ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
}
}
});
- ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
+ pool.allowCoreThreadTimeOut(true);
return pool;
}
@@ -722,8 +740,8 @@ private void assertRefFileNameEqual(String familyName) throws IOException {
ResultScanner results = table.getScanner(scan);
Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(),
tableName, familyName);
- List actualFilePaths = new ArrayList<>();
- List expectFilePaths = new ArrayList<>();
+ List actualFilePaths = new ArrayList();
+ List expectFilePaths = new ArrayList();
for (Result res : results) {
for (Cell cell : res.listCells()) {
byte[] referenceValue = CellUtil.cloneValue(cell);
@@ -753,9 +771,28 @@ private void assertRefFileNameEqual(String familyName) throws IOException {
* Resets the configuration.
*/
private void resetConf() {
- conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
+ setLongConf(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);
- conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE,
+ setIntConf(MobConstants.MOB_COMPACTION_BATCH_SIZE,
MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE);
}
+
+ private void setLongConf(String key, long value) {
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setLong(key, value);
+ TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration().setLong(key, value);
+ }
+
+ private void setIntConf(String key, int value) {
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setInt(key, value);
+ TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration().setInt(key, value);
+ }
+
+ private void compact(TableName tableName, HColumnDescriptor hcd) throws IOException,
+ InterruptedException, ExecutionException {
+ HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
+ List columns = new ArrayList(1);
+ columns.add(hcd);
+ Future future = master.requestMobCompaction(tableName, columns, false);
+ future.get();
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
deleted file mode 100644
index 0e0bdbe..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mob.compactions;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.regionserver.*;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.mob.MobFileName;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.mob.compactions.MobCompactionRequest.CompactionType;
-import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(LargeTests.class)
-public class TestPartitionedMobCompactor {
- private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private final static String family = "family";
- private final static String qf = "qf";
- private HColumnDescriptor hcd = new HColumnDescriptor(family);
- private Configuration conf = TEST_UTIL.getConfiguration();
- private CacheConfig cacheConf = new CacheConfig(conf);
- private FileSystem fs;
- private List mobFiles = new ArrayList<>();
- private List delFiles = new ArrayList<>();
- private List allFiles = new ArrayList<>();
- private Path basePath;
- private String mobSuffix;
- private String delSuffix;
- private static ExecutorService pool;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
- TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
- TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
- TEST_UTIL.startMiniCluster(1);
- pool = createThreadPool();
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- pool.shutdown();
- TEST_UTIL.shutdownMiniCluster();
- }
-
- private void init(String tableName) throws Exception {
- fs = FileSystem.get(conf);
- Path testDir = FSUtils.getRootDir(conf);
- Path mobTestDir = new Path(testDir, MobConstants.MOB_DIR_NAME);
- basePath = new Path(new Path(mobTestDir, tableName), family);
- mobSuffix = UUID.randomUUID().toString().replaceAll("-", "");
- delSuffix = UUID.randomUUID().toString().replaceAll("-", "") + "_del";
- }
-
- @Test
- public void testCompactionSelectWithAllFiles() throws Exception {
- String tableName = "testCompactionSelectWithAllFiles";
- testCompactionAtMergeSize(tableName, MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD,
- CompactionType.ALL_FILES, false);
- }
-
- @Test
- public void testCompactionSelectWithPartFiles() throws Exception {
- String tableName = "testCompactionSelectWithPartFiles";
- testCompactionAtMergeSize(tableName, 4000, CompactionType.PART_FILES, false);
- }
-
- @Test
- public void testCompactionSelectWithForceAllFiles() throws Exception {
- String tableName = "testCompactionSelectWithForceAllFiles";
- testCompactionAtMergeSize(tableName, Long.MAX_VALUE, CompactionType.ALL_FILES, true);
- }
-
- private void testCompactionAtMergeSize(final String tableName,
- final long mergeSize, final CompactionType type, final boolean isForceAllFiles)
- throws Exception {
- resetConf();
- init(tableName);
- int count = 10;
- // create 10 mob files.
- createStoreFiles(basePath, family, qf, count, Type.Put);
- // create 10 del files
- createStoreFiles(basePath, family, qf, count, Type.Delete);
- listFiles();
- List expectedStartKeys = new ArrayList<>();
- for(FileStatus file : mobFiles) {
- if(file.getLen() < mergeSize) {
- String fileName = file.getPath().getName();
- String startKey = fileName.substring(0, 32);
- expectedStartKeys.add(startKey);
- }
- }
- // set the mob compaction mergeable threshold
- conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
- testSelectFiles(tableName, type, isForceAllFiles, expectedStartKeys);
- }
-
- @Test
- public void testCompactDelFilesWithDefaultBatchSize() throws Exception {
- String tableName = "testCompactDelFilesWithDefaultBatchSize";
- testCompactDelFilesAtBatchSize(tableName, MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE,
- MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
- }
-
- @Test
- public void testCompactDelFilesWithSmallBatchSize() throws Exception {
- String tableName = "testCompactDelFilesWithSmallBatchSize";
- testCompactDelFilesAtBatchSize(tableName, 4, MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
- }
-
- @Test
- public void testCompactDelFilesChangeMaxDelFileCount() throws Exception {
- String tableName = "testCompactDelFilesWithSmallBatchSize";
- testCompactDelFilesAtBatchSize(tableName, 4, 2);
- }
-
- private void testCompactDelFilesAtBatchSize(String tableName, int batchSize,
- int delfileMaxCount) throws Exception {
- resetConf();
- init(tableName);
- // create 20 mob files.
- createStoreFiles(basePath, family, qf, 20, Type.Put);
- // create 13 del files
- createStoreFiles(basePath, family, qf, 13, Type.Delete);
- listFiles();
-
- // set the max del file count
- conf.setInt(MobConstants.MOB_DELFILE_MAX_COUNT, delfileMaxCount);
- // set the mob compaction batch size
- conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, batchSize);
- testCompactDelFiles(tableName, 1, 13, false);
- }
-
- /**
- * Tests the selectFiles
- * @param tableName the table name
- * @param type the expected compaction type
- * @param isForceAllFiles whether all the mob files are selected
- * @param expected the expected start keys
- */
- private void testSelectFiles(String tableName, final CompactionType type,
- final boolean isForceAllFiles, final List expected) throws IOException {
- PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs,
- TableName.valueOf(tableName), hcd, pool) {
- @Override
- public List compact(List files, boolean isForceAllFiles)
- throws IOException {
- if (files == null || files.isEmpty()) {
- return null;
- }
- PartitionedMobCompactionRequest request = select(files, isForceAllFiles);
- // assert the compaction type
- Assert.assertEquals(type, request.type);
- // assert get the right partitions
- compareCompactedPartitions(expected, request.compactionPartitions);
- // assert get the right del files
- compareDelFiles(request.delFiles);
- return null;
- }
- };
- compactor.compact(allFiles, isForceAllFiles);
- }
-
- /**
- * Tests the compacteDelFile
- * @param tableName the table name
- * @param expectedFileCount the expected file count
- * @param expectedCellCount the expected cell count
- * @param isForceAllFiles whether all the mob files are selected
- */
- private void testCompactDelFiles(String tableName, final int expectedFileCount,
- final int expectedCellCount, boolean isForceAllFiles) throws IOException {
- PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs,
- TableName.valueOf(tableName), hcd, pool) {
- @Override
- protected List performCompaction(PartitionedMobCompactionRequest request)
- throws IOException {
- List delFilePaths = new ArrayList();
- for (FileStatus delFile : request.delFiles) {
- delFilePaths.add(delFile.getPath());
- }
- List newDelPaths = compactDelFiles(request, delFilePaths);
- // assert the del files are merged.
- Assert.assertEquals(expectedFileCount, newDelPaths.size());
- Assert.assertEquals(expectedCellCount, countDelCellsInDelFiles(newDelPaths));
- return null;
- }
- };
- compactor.compact(allFiles, isForceAllFiles);
- }
-
- /**
- * Lists the files in the path
- */
- private void listFiles() throws IOException {
- for (FileStatus file : fs.listStatus(basePath)) {
- allFiles.add(file);
- if (file.getPath().getName().endsWith("_del")) {
- delFiles.add(file);
- } else {
- mobFiles.add(file);
- }
- }
- }
-
- /**
- * Compares the compacted partitions.
- * @param partitions the collection of CompactedPartitions
- */
- private void compareCompactedPartitions(List expected,
- Collection partitions) {
- List actualKeys = new ArrayList<>();
- for (CompactionPartition partition : partitions) {
- actualKeys.add(partition.getPartitionId().getStartKey());
- }
- Collections.sort(expected);
- Collections.sort(actualKeys);
- Assert.assertEquals(expected.size(), actualKeys.size());
- for (int i = 0; i < expected.size(); i++) {
- Assert.assertEquals(expected.get(i), actualKeys.get(i));
- }
- }
-
- /**
- * Compares the del files.
- * @param allDelFiles all the del files
- */
- private void compareDelFiles(Collection allDelFiles) {
- int i = 0;
- for (FileStatus file : allDelFiles) {
- Assert.assertEquals(delFiles.get(i), file);
- i++;
- }
- }
-
- /**
- * Creates store files.
- * @param basePath the path to create file
- * @family the family name
- * @qualifier the column qualifier
- * @count the store file number
- * @type the key type
- */
- private void createStoreFiles(Path basePath, String family, String qualifier, int count,
- Type type) throws IOException {
- HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
- String startKey = "row_";
- MobFileName mobFileName = null;
- for (int i = 0; i < count; i++) {
- byte[] startRow = Bytes.toBytes(startKey + i) ;
- if(type.equals(Type.Delete)) {
- mobFileName = MobFileName.create(startRow, MobUtils.formatDate(
- new Date()), delSuffix);
- }
- if(type.equals(Type.Put)){
- mobFileName = MobFileName.create(Bytes.toBytes(startKey + i), MobUtils.formatDate(
- new Date()), mobSuffix);
- }
- StoreFileWriter mobFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs)
- .withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build();
- writeStoreFile(mobFileWriter, startRow, Bytes.toBytes(family), Bytes.toBytes(qualifier),
- type, (i+1)*1000);
- }
- }
-
- /**
- * Writes data to store file.
- * @param writer the store file writer
- * @param row the row key
- * @param family the family name
- * @param qualifier the column qualifier
- * @param type the key type
- * @param size the size of value
- */
- private static void writeStoreFile(final StoreFileWriter writer, byte[]row, byte[] family,
- byte[] qualifier, Type type, int size) throws IOException {
- long now = System.currentTimeMillis();
- try {
- byte[] dummyData = new byte[size];
- new Random().nextBytes(dummyData);
- writer.append(new KeyValue(row, family, qualifier, now, type, dummyData));
- } finally {
- writer.close();
- }
- }
-
- /**
- * Gets the number of del cell in the del files
- * @param paths the del file paths
- * @return the cell size
- */
- private int countDelCellsInDelFiles(List paths) throws IOException {
- List sfs = new ArrayList();
- int size = 0;
- for(Path path : paths) {
- StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE);
- sfs.add(sf);
- }
- List scanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true,
- false, false, HConstants.LATEST_TIMESTAMP);
- Scan scan = new Scan();
- scan.setMaxVersions(hcd.getMaxVersions());
- long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
- long ttl = HStore.determineTTLFromFamily(hcd);
- ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparator.COMPARATOR);
- StoreScanner scanner = new StoreScanner(scan, scanInfo, ScanType.COMPACT_RETAIN_DELETES, null,
- scanners, 0L, HConstants.LATEST_TIMESTAMP);
- List| results = new ArrayList<>();
- boolean hasMore = true;
-
- while (hasMore) {
- hasMore = scanner.next(results);
- size += results.size();
- results.clear();
- }
- scanner.close();
- return size;
- }
-
- private static ExecutorService createThreadPool() {
- int maxThreads = 10;
- long keepAliveTime = 60;
- final SynchronousQueue queue = new SynchronousQueue();
- ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime,
- TimeUnit.SECONDS, queue, Threads.newDaemonThreadFactory("MobFileCompactionChore"),
- new RejectedExecutionHandler() {
- @Override
- public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
- try {
- // waiting for a thread to pick up instead of throwing exceptions.
- queue.put(r);
- } catch (InterruptedException e) {
- throw new RejectedExecutionException(e);
- }
- }
- });
- ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
- return pool;
- }
-
- /**
- * Resets the configuration.
- */
- private void resetConf() {
- conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
- MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);
- conf.setInt(MobConstants.MOB_DELFILE_MAX_COUNT, MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
- conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE,
- MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE);
- }
-}
\ No newline at end of file
| | | | | | |