From 023a6a4ca7da1ddbe53f3f6748c05935e3594502 Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Thu, 8 Jun 2017 18:08:02 -0400 Subject: [PATCH] HBASE-18135 Implement mechanism for RegionServers to report file archival for space quotas This de-couples the snapshot size calculation from the SpaceQuotaObserverChore into another API which both the periodically invoked Master chore and the Master service endpoint can invoke. This allows for multiple sources of snapshot size to reported (from the multiple sources we have in HBase). When a file is archived, snapshot sizes can be more quickly realized and the Master can still perform periodical computations of the total snapshot size to account for any delayed/missing/lost file archival RPCs. --- .../hbase/client/ConnectionImplementation.java | 2 + .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 6 +- .../shaded/protobuf/generated/ClientProtos.java | 4 +- .../generated/RegionServerStatusProtos.java | 2130 +++++++++++++++++++- .../src/main/protobuf/RegionServerStatus.proto | 16 + .../org/apache/hadoop/hbase/master/HMaster.java | 4 + .../hadoop/hbase/master/MasterRpcServices.java | 18 + .../hadoop/hbase/quotas/FileArchiverNotifier.java | 50 + .../hbase/quotas/FileArchiverNotifierFactory.java | 33 + .../quotas/FileArchiverNotifierFactoryImpl.java | 111 + .../hbase/quotas/FileArchiverNotifierImpl.java | 616 ++++++ .../hadoop/hbase/quotas/MasterQuotaManager.java | 30 + .../quotas/RegionServerSpaceQuotaManager.java | 33 + .../hbase/quotas/SnapshotQuotaObserverChore.java | 353 +--- .../hadoop/hbase/regionserver/HRegionServer.java | 35 + .../apache/hadoop/hbase/regionserver/HStore.java | 49 + .../hbase/regionserver/RegionServerServices.java | 13 + .../hadoop/hbase/MockRegionServerServices.java | 8 + .../hadoop/hbase/master/MockRegionServer.java | 8 + .../hbase/quotas/TestFileArchiverNotifierImpl.java | 307 +++ .../hbase/quotas/TestLowLatencySpaceQuotas.java | 83 +- .../quotas/TestSnapshotQuotaObserverChore.java | 222 +- 22 files changed, 3683 insertions(+), 448 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifier.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 71b0bb3011..e797d10527 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -96,6 +96,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaSta import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 0da3f779a8..b1214fc585 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -73,13 +73,13 @@ import org.apache.hadoop.hbase.util.Strings; /** * Helper class to interact with the quota table. * - * + * * * - * + * * * - * + * * * * diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java index b93f6cc7e0..96652b413e 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java @@ -22985,7 +22985,7 @@ public final class ClientProtos { * optional .hbase.pb.Cursor cursor = 12; */ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CursorOrBuilder> getCursorFieldBuilder() { if (cursorBuilder_ == null) { cursorBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< @@ -41831,7 +41831,7 @@ public final class ClientProtos { internal_static_hbase_pb_ScanRequest_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_Cursor_descriptor; - private static final + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_Cursor_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java index 299b55e863..d0967efa7a 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java @@ -10728,6 +10728,1975 @@ public final class RegionServerStatusProtos { } + public interface FileArchiveNotificationRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.FileArchiveNotificationRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + java.util.List + getArchivedFilesList(); + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize getArchivedFiles(int index); + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + int getArchivedFilesCount(); + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + java.util.List + getArchivedFilesOrBuilderList(); + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder getArchivedFilesOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.FileArchiveNotificationRequest} + */ + public static final class FileArchiveNotificationRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.FileArchiveNotificationRequest) + FileArchiveNotificationRequestOrBuilder { + // Use FileArchiveNotificationRequest.newBuilder() to construct. + private FileArchiveNotificationRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private FileArchiveNotificationRequest() { + archivedFiles_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FileArchiveNotificationRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + archivedFiles_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + archivedFiles_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + archivedFiles_ = java.util.Collections.unmodifiableList(archivedFiles_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.Builder.class); + } + + public interface FileWithSizeOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.FileArchiveNotificationRequest.FileWithSize) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * optional .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * optional .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * optional .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + /** + * optional string name = 2; + */ + boolean hasName(); + /** + * optional string name = 2; + */ + java.lang.String getName(); + /** + * optional string name = 2; + */ + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getNameBytes(); + + /** + * optional uint64 size = 3; + */ + boolean hasSize(); + /** + * optional uint64 size = 3; + */ + long getSize(); + } + /** + * Protobuf type {@code hbase.pb.FileArchiveNotificationRequest.FileWithSize} + */ + public static final class FileWithSize extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.FileArchiveNotificationRequest.FileWithSize) + FileWithSizeOrBuilder { + // Use FileWithSize.newBuilder() to construct. + private FileWithSize(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private FileWithSize() { + name_ = ""; + size_ = 0L; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FileWithSize( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000002; + name_ = bs; + break; + } + case 24: { + bitField0_ |= 0x00000004; + size_ = input.readUInt64(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder.class); + } + + private int bitField0_; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } + + public static final int NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object name_; + /** + * optional string name = 2; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * optional string name = 2; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + + public static final int SIZE_FIELD_NUMBER = 3; + private long size_; + /** + * optional uint64 size = 3; + */ + public boolean hasSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 size = 3; + */ + public long getSize() { + return size_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getTableName()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, size_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getTableName()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, size_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && (hasSize() == other.hasSize()); + if (hasSize()) { + result = result && (getSize() + == other.getSize()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (hasSize()) { + hash = (37 * hash) + SIZE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong( + getSize()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.FileArchiveNotificationRequest.FileWithSize} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.FileArchiveNotificationRequest.FileWithSize) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = null; + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + size_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.size_ = size_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasName()) { + bitField0_ |= 0x00000002; + name_ = other.name_; + onChanged(); + } + if (other.hasSize()) { + setSize(other.getSize()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != null && + tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = null; + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_; + } + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + getTableName(), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + private java.lang.Object name_ = ""; + /** + * optional string name = 2; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string name = 2; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string name = 2; + */ + public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; + } + } + /** + * optional string name = 2; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + name_ = value; + onChanged(); + return this; + } + /** + * optional string name = 2; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000002); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * optional string name = 2; + */ + public Builder setNameBytes( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + name_ = value; + onChanged(); + return this; + } + + private long size_ ; + /** + * optional uint64 size = 3; + */ + public boolean hasSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 size = 3; + */ + public long getSize() { + return size_; + } + /** + * optional uint64 size = 3; + */ + public Builder setSize(long value) { + bitField0_ |= 0x00000004; + size_ = value; + onChanged(); + return this; + } + /** + * optional uint64 size = 3; + */ + public Builder clearSize() { + bitField0_ = (bitField0_ & ~0x00000004); + size_ = 0L; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.FileArchiveNotificationRequest.FileWithSize) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.FileArchiveNotificationRequest.FileWithSize) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public FileWithSize parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new FileWithSize(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public static final int ARCHIVED_FILES_FIELD_NUMBER = 1; + private java.util.List archivedFiles_; + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public java.util.List getArchivedFilesList() { + return archivedFiles_; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public java.util.List + getArchivedFilesOrBuilderList() { + return archivedFiles_; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public int getArchivedFilesCount() { + return archivedFiles_.size(); + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize getArchivedFiles(int index) { + return archivedFiles_.get(index); + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder getArchivedFilesOrBuilder( + int index) { + return archivedFiles_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getArchivedFilesCount(); i++) { + if (!getArchivedFiles(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < archivedFiles_.size(); i++) { + output.writeMessage(1, archivedFiles_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < archivedFiles_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, archivedFiles_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest) obj; + + boolean result = true; + result = result && getArchivedFilesList() + .equals(other.getArchivedFilesList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getArchivedFilesCount() > 0) { + hash = (37 * hash) + ARCHIVED_FILES_FIELD_NUMBER; + hash = (53 * hash) + getArchivedFilesList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.FileArchiveNotificationRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.FileArchiveNotificationRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getArchivedFilesFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (archivedFilesBuilder_ == null) { + archivedFiles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + archivedFilesBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest(this); + int from_bitField0_ = bitField0_; + if (archivedFilesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + archivedFiles_ = java.util.Collections.unmodifiableList(archivedFiles_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.archivedFiles_ = archivedFiles_; + } else { + result.archivedFiles_ = archivedFilesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.getDefaultInstance()) return this; + if (archivedFilesBuilder_ == null) { + if (!other.archivedFiles_.isEmpty()) { + if (archivedFiles_.isEmpty()) { + archivedFiles_ = other.archivedFiles_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureArchivedFilesIsMutable(); + archivedFiles_.addAll(other.archivedFiles_); + } + onChanged(); + } + } else { + if (!other.archivedFiles_.isEmpty()) { + if (archivedFilesBuilder_.isEmpty()) { + archivedFilesBuilder_.dispose(); + archivedFilesBuilder_ = null; + archivedFiles_ = other.archivedFiles_; + bitField0_ = (bitField0_ & ~0x00000001); + archivedFilesBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getArchivedFilesFieldBuilder() : null; + } else { + archivedFilesBuilder_.addAllMessages(other.archivedFiles_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getArchivedFilesCount(); i++) { + if (!getArchivedFiles(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List archivedFiles_ = + java.util.Collections.emptyList(); + private void ensureArchivedFilesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + archivedFiles_ = new java.util.ArrayList(archivedFiles_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder> archivedFilesBuilder_; + + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public java.util.List getArchivedFilesList() { + if (archivedFilesBuilder_ == null) { + return java.util.Collections.unmodifiableList(archivedFiles_); + } else { + return archivedFilesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public int getArchivedFilesCount() { + if (archivedFilesBuilder_ == null) { + return archivedFiles_.size(); + } else { + return archivedFilesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize getArchivedFiles(int index) { + if (archivedFilesBuilder_ == null) { + return archivedFiles_.get(index); + } else { + return archivedFilesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder setArchivedFiles( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize value) { + if (archivedFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchivedFilesIsMutable(); + archivedFiles_.set(index, value); + onChanged(); + } else { + archivedFilesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder setArchivedFiles( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder builderForValue) { + if (archivedFilesBuilder_ == null) { + ensureArchivedFilesIsMutable(); + archivedFiles_.set(index, builderForValue.build()); + onChanged(); + } else { + archivedFilesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder addArchivedFiles(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize value) { + if (archivedFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchivedFilesIsMutable(); + archivedFiles_.add(value); + onChanged(); + } else { + archivedFilesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder addArchivedFiles( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize value) { + if (archivedFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchivedFilesIsMutable(); + archivedFiles_.add(index, value); + onChanged(); + } else { + archivedFilesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder addArchivedFiles( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder builderForValue) { + if (archivedFilesBuilder_ == null) { + ensureArchivedFilesIsMutable(); + archivedFiles_.add(builderForValue.build()); + onChanged(); + } else { + archivedFilesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder addArchivedFiles( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder builderForValue) { + if (archivedFilesBuilder_ == null) { + ensureArchivedFilesIsMutable(); + archivedFiles_.add(index, builderForValue.build()); + onChanged(); + } else { + archivedFilesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder addAllArchivedFiles( + java.lang.Iterable values) { + if (archivedFilesBuilder_ == null) { + ensureArchivedFilesIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, archivedFiles_); + onChanged(); + } else { + archivedFilesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder clearArchivedFiles() { + if (archivedFilesBuilder_ == null) { + archivedFiles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + archivedFilesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public Builder removeArchivedFiles(int index) { + if (archivedFilesBuilder_ == null) { + ensureArchivedFilesIsMutable(); + archivedFiles_.remove(index); + onChanged(); + } else { + archivedFilesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder getArchivedFilesBuilder( + int index) { + return getArchivedFilesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder getArchivedFilesOrBuilder( + int index) { + if (archivedFilesBuilder_ == null) { + return archivedFiles_.get(index); } else { + return archivedFilesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public java.util.List + getArchivedFilesOrBuilderList() { + if (archivedFilesBuilder_ != null) { + return archivedFilesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(archivedFiles_); + } + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder addArchivedFilesBuilder() { + return getArchivedFilesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.getDefaultInstance()); + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder addArchivedFilesBuilder( + int index) { + return getArchivedFilesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.getDefaultInstance()); + } + /** + * repeated .hbase.pb.FileArchiveNotificationRequest.FileWithSize archived_files = 1; + */ + public java.util.List + getArchivedFilesBuilderList() { + return getArchivedFilesFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder> + getArchivedFilesFieldBuilder() { + if (archivedFilesBuilder_ == null) { + archivedFilesBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSizeOrBuilder>( + archivedFiles_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + archivedFiles_ = null; + } + return archivedFilesBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.FileArchiveNotificationRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.FileArchiveNotificationRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public FileArchiveNotificationRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new FileArchiveNotificationRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface FileArchiveNotificationResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.FileArchiveNotificationResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.FileArchiveNotificationResponse} + */ + public static final class FileArchiveNotificationResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.FileArchiveNotificationResponse) + FileArchiveNotificationResponseOrBuilder { + // Use FileArchiveNotificationResponse.newBuilder() to construct. + private FileArchiveNotificationResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private FileArchiveNotificationResponse() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FileArchiveNotificationResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse) obj; + + boolean result = true; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.FileArchiveNotificationResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.FileArchiveNotificationResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + public Builder clear() { + super.clear(); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_FileArchiveNotificationResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse(this); + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.FileArchiveNotificationResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.FileArchiveNotificationResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public FileArchiveNotificationResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new FileArchiveNotificationResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + /** * Protobuf service {@code hbase.pb.RegionServerStatusService} */ @@ -10816,6 +12785,18 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+       ** Reports files that were moved to the archive directory for space quotas 
+       * 
+ * + * rpc ReportFileArchival(.hbase.pb.FileArchiveNotificationRequest) returns (.hbase.pb.FileArchiveNotificationResponse); + */ + public abstract void reportFileArchival( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( @@ -10869,6 +12850,14 @@ public final class RegionServerStatusProtos { impl.reportRegionSpaceUse(controller, request, done); } + @java.lang.Override + public void reportFileArchival( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.reportFileArchival(controller, request, done); + } + }; } @@ -10903,6 +12892,8 @@ public final class RegionServerStatusProtos { return impl.reportRegionStateTransition(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest)request); case 5: return impl.reportRegionSpaceUse(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)request); + case 6: + return impl.reportFileArchival(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10929,6 +12920,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -10955,6 +12948,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -11043,6 +13038,18 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + *
+     ** Reports files that were moved to the archive directory for space quotas 
+     * 
+ * + * rpc ReportFileArchival(.hbase.pb.FileArchiveNotificationRequest) returns (.hbase.pb.FileArchiveNotificationResponse); + */ + public abstract void reportFileArchival( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -11095,6 +13102,11 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 6: + this.reportFileArchival(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -11121,6 +13133,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -11147,6 +13161,8 @@ public final class RegionServerStatusProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.getDefaultInstance(); case 5: return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -11257,6 +13273,21 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance())); } + + public void reportFileArchival( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -11294,6 +13325,11 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse reportFileArchival( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -11374,6 +13410,18 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse reportFileArchival( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerStatusService) @@ -11449,6 +13497,21 @@ public final class RegionServerStatusProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_FileArchiveNotificationRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_FileArchiveNotificationResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_FileArchiveNotificationResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -11494,26 +13557,35 @@ public final class RegionServerStatusProtos { "2\024.hbase.pb.RegionInfo\022\023\n\013region_size\030\002 " + "\001(\004\"J\n\033RegionSpaceUseReportRequest\022+\n\tsp" + "ace_use\030\001 \003(\0132\030.hbase.pb.RegionSpaceUse\"" + - "\036\n\034RegionSpaceUseReportResponse2\227\005\n\031Regi" + - "onServerStatusService\022b\n\023RegionServerSta" + - "rtup\022$.hbase.pb.RegionServerStartupReque" + - "st\032%.hbase.pb.RegionServerStartupRespons", - "e\022_\n\022RegionServerReport\022#.hbase.pb.Regio" + - "nServerReportRequest\032$.hbase.pb.RegionSe" + - "rverReportResponse\022_\n\022ReportRSFatalError" + - "\022#.hbase.pb.ReportRSFatalErrorRequest\032$." + - "hbase.pb.ReportRSFatalErrorResponse\022q\n\030G" + - "etLastFlushedSequenceId\022).hbase.pb.GetLa" + - "stFlushedSequenceIdRequest\032*.hbase.pb.Ge" + - "tLastFlushedSequenceIdResponse\022z\n\033Report" + - "RegionStateTransition\022,.hbase.pb.ReportR" + - "egionStateTransitionRequest\032-.hbase.pb.R", - "eportRegionStateTransitionResponse\022e\n\024Re" + - "portRegionSpaceUse\022%.hbase.pb.RegionSpac" + - "eUseReportRequest\032&.hbase.pb.RegionSpace" + - "UseReportResponseBU\n1org.apache.hadoop.h" + - "base.shaded.protobuf.generatedB\030RegionSe" + - "rverStatusProtosH\001\210\001\001\240\001\001" + "\036\n\034RegionSpaceUseReportResponse\"\304\001\n\036File" + + "ArchiveNotificationRequest\022M\n\016archived_f" + + "iles\030\001 \003(\01325.hbase.pb.FileArchiveNotific" + + "ationRequest.FileWithSize\032S\n\014FileWithSiz", + "e\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.TableNa" + + "me\022\014\n\004name\030\002 \001(\t\022\014\n\004size\030\003 \001(\004\"!\n\037FileAr" + + "chiveNotificationResponse2\202\006\n\031RegionServ" + + "erStatusService\022b\n\023RegionServerStartup\022$" + + ".hbase.pb.RegionServerStartupRequest\032%.h" + + "base.pb.RegionServerStartupResponse\022_\n\022R" + + "egionServerReport\022#.hbase.pb.RegionServe" + + "rReportRequest\032$.hbase.pb.RegionServerRe" + + "portResponse\022_\n\022ReportRSFatalError\022#.hba" + + "se.pb.ReportRSFatalErrorRequest\032$.hbase.", + "pb.ReportRSFatalErrorResponse\022q\n\030GetLast" + + "FlushedSequenceId\022).hbase.pb.GetLastFlus" + + "hedSequenceIdRequest\032*.hbase.pb.GetLastF" + + "lushedSequenceIdResponse\022z\n\033ReportRegion" + + "StateTransition\022,.hbase.pb.ReportRegionS" + + "tateTransitionRequest\032-.hbase.pb.ReportR" + + "egionStateTransitionResponse\022e\n\024ReportRe" + + "gionSpaceUse\022%.hbase.pb.RegionSpaceUseRe" + + "portRequest\032&.hbase.pb.RegionSpaceUseRep" + + "ortResponse\022i\n\022ReportFileArchival\022(.hbas", + "e.pb.FileArchiveNotificationRequest\032).hb" + + "ase.pb.FileArchiveNotificationResponseBU" + + "\n1org.apache.hadoop.hbase.shaded.protobu" + + "f.generatedB\030RegionServerStatusProtosH\001\210" + + "\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -11613,6 +13685,24 @@ public final class RegionServerStatusProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hbase_pb_FileArchiveNotificationRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor, + new java.lang.String[] { "ArchivedFiles", }); + internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_descriptor = + internal_static_hbase_pb_FileArchiveNotificationRequest_descriptor.getNestedTypes().get(0); + internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_FileArchiveNotificationRequest_FileWithSize_descriptor, + new java.lang.String[] { "TableName", "Name", "Size", }); + internal_static_hbase_pb_FileArchiveNotificationResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_hbase_pb_FileArchiveNotificationResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_FileArchiveNotificationResponse_descriptor, + new java.lang.String[] { }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto index 7d35df019d..62897b8618 100644 --- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto @@ -143,6 +143,18 @@ message RegionSpaceUseReportResponse { } +message FileArchiveNotificationRequest { + message FileWithSize { + optional TableName table_name = 1; + optional string name = 2; + optional uint64 size = 3; + } + repeated FileWithSize archived_files = 1; +} + +message FileArchiveNotificationResponse { +} + service RegionServerStatusService { /** Called when a region server first starts. */ rpc RegionServerStartup(RegionServerStartupRequest) @@ -178,4 +190,8 @@ service RegionServerStatusService { */ rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest) returns(RegionSpaceUseReportResponse); + + /** Reports files that were moved to the archive directory for space quotas */ + rpc ReportFileArchival(FileArchiveNotificationRequest) + returns(FileArchiveNotificationResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index aa0d80368f..5de23204e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3482,4 +3482,8 @@ public class HMaster extends HRegionServer implements MasterServices { public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() { return this.spaceQuotaSnapshotNotifier; } + + public SnapshotQuotaObserverChore getSnapshotQuotaObserverChore() { + return this.snapshotQuotaChore; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index c6397f36b0..3031d6dbb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -122,6 +122,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuo import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -2039,4 +2041,20 @@ public class MasterRpcServices extends RSRpcServices throw new ServiceException(ioe); } } + + @Override + public FileArchiveNotificationResponse reportFileArchival(RpcController controller, + FileArchiveNotificationRequest request) throws ServiceException { + try { + master.checkInitialized(); + if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) { + return FileArchiveNotificationResponse.newBuilder().build(); + } + master.getMasterQuotaManager().processFileArchivals(request, master.getConnection(), + master.getConfiguration(), master.getFileSystem()); + return FileArchiveNotificationResponse.newBuilder().build(); + } catch (Exception e) { + throw new ServiceException(e); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifier.java new file mode 100644 index 0000000000..79bc0181f8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifier.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.io.IOException; +import java.util.Collection; +import java.util.Set; +import java.util.Map.Entry; + +/** + * Interface allowing various implementations of tracking files that have recently been archived to + * allow for the Master to notice changes to snapshot sizes for space quotas. + * + * This object needs to ensure that {@link #addArchivedFiles(Set)} and + * {@link #computeAndStoreSnapshotSizes(Collection)} are mutually exclusive. If a "full" computation + * is in progress, new changes being archived should be held. + */ +public interface FileArchiverNotifier { + + /** + * Records a file and its size in bytes being moved to the archive directory. + * + * @param fileSizes A collection of file name to size in bytes + * @throws IOException If there was an IO-related error persisting the file size(s) + */ + void addArchivedFiles(Set> fileSizes) throws IOException; + + /** + * Computes the size of a table and all of its snapshots, recording new "full" sizes for each. + * + * @param currentSnapshots the current list of snapshots against this table + * @return The total size of all snapshots against this table. + * @throws IOException If there was an IO-related error computing or persisting the sizes. + */ + long computeAndStoreSnapshotSizes(Collection currentSnapshots) throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactory.java new file mode 100644 index 0000000000..2af8175b50 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactory.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; + +/** + * Factory class to create {@link FileArchiverNotifier} instances. + */ +public interface FileArchiverNotifierFactory { + + /** + * Creates or obtains a {@link FileArchiverNotifier} instance for the given args. + */ + FileArchiverNotifier get(Connection conn, Configuration conf, FileSystem fs, TableName tn); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java new file mode 100644 index 0000000000..e911df471e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; + +import com.google.common.annotations.VisibleForTesting; + +/** + * A factory for getting instances of {@link FileArchiverNotifier}. + */ +public class FileArchiverNotifierFactoryImpl implements FileArchiverNotifierFactory { + private static final FileArchiverNotifierFactoryImpl DEFAULT_INSTANCE = new FileArchiverNotifierFactoryImpl(); + private static volatile FileArchiverNotifierFactory CURRENT_INSTANCE = DEFAULT_INSTANCE; + private final ConcurrentHashMap CACHE; + + private FileArchiverNotifierFactoryImpl() { + CACHE = new ConcurrentHashMap<>(); + } + + public static FileArchiverNotifierFactory getInstance() { + return CURRENT_INSTANCE; + } + + @VisibleForTesting + static void setInstance(FileArchiverNotifierFactory inst) { + CURRENT_INSTANCE = Objects.requireNonNull(inst); + } + + @VisibleForTesting + static void reset() { + CURRENT_INSTANCE = DEFAULT_INSTANCE; + } + + /** + * Returns the {@link FileArchiverNotifier} instance for the given {@link TableName}. + * + * @param tn The table to obtain a notifier for + * @return The notifier for the given {@code tablename}. + */ + public FileArchiverNotifier get( + Connection conn, Configuration conf, FileSystem fs, TableName tn) { + // Ensure that only one instance is exposed to callers + final FileArchiverNotifier newMapping = new FileArchiverNotifierImpl(conn, conf, fs, tn); + final FileArchiverNotifier previousMapping = CACHE.putIfAbsent(tn, newMapping); + if (previousMapping == null) { + return newMapping; + } + return previousMapping; + } + + public int getCacheSize() { + return CACHE.size(); + } + + static class CacheKey { + final Connection conn; + final Configuration conf; + final FileSystem fs; + final TableName tn; + + CacheKey(Connection conn, Configuration conf, FileSystem fs, TableName tn) { + this.conn = conn; + this.conf = conf; + this.fs = fs; + this.tn = tn; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CacheKey)) { + return false; + } + CacheKey other = (CacheKey) o; + // TableName should be the only thing differing.. + return tn.equals(other.tn) && conn.equals(other.conn) && conf.equals(other.conf) + && fs.equals(other.fs); + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(conn).append(conf).append(fs).append(tn).toHashCode(); + } + + @Override + public String toString() { + return "CacheKey[TableName=" + tn + "]"; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java new file mode 100644 index 0000000000..3964c606e0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java @@ -0,0 +1,616 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.util.StringUtils; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; + +/** + * Tracks file archiving and updates the hbase quota table. + */ +public class FileArchiverNotifierImpl implements FileArchiverNotifier { + private static final Log LOG = LogFactory.getLog(FileArchiverNotifierImpl.class); + private final Connection conn; + private final Configuration conf; + private final FileSystem fs; + private final TableName tn; + private final ReadLock readLock; + private final WriteLock writeLock; + private volatile long lastFullCompute = Long.MIN_VALUE; + private List currentSnapshots = Collections.emptyList(); + private static final Map NAMESPACE_LOCKS = new HashMap<>(); + + public FileArchiverNotifierImpl(Connection conn, Configuration conf, FileSystem fs, TableName tn) { + this.conn = conn; + this.conf = conf; + this.fs = fs; + this.tn = tn; + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + readLock = lock.readLock(); + writeLock = lock.writeLock(); + } + + static synchronized Object getLockForNamespace(String namespace) { + return NAMESPACE_LOCKS.computeIfAbsent(namespace, (ns) -> new Object()); + } + + /** + * Returns a strictly-increasing measure of time extracted by {@link System#nanoTime()}. + */ + long getLastFullCompute() { + return lastFullCompute; + } + + @Override + public void addArchivedFiles(Set> fileSizes) throws IOException { + long start = System.nanoTime(); + readLock.lock(); + try { + // We want to catch the case where we got an archival request, but there was a full + // re-computation in progress that was blocking us. Most likely, the full computation is going + // to already include the changes we were going to make. + // + // Same as "start < lastFullCompute" but avoiding numeric overflow per the + // System.nanoTime() javadoc + if (lastFullCompute != Long.MIN_VALUE && start - lastFullCompute < 0) { + if (LOG.isTraceEnabled()) { + LOG.trace("A full computation was performed after this request was received." + + " Ignoring requested updates: " + fileSizes); + } + return; + } + + if (LOG.isTraceEnabled()) { + LOG.trace("currentSnapshots: " + currentSnapshots + " fileSize: "+ fileSizes); + } + + // Write increment to quota table for the correct snapshot. Only do this if we have snapshots + // and some files that were archived. + if (!currentSnapshots.isEmpty() && !fileSizes.isEmpty()) { + // We get back the files which no snapshot referenced (the files which will be deleted soon) + groupArchivedFiledBySnapshotAndRecordSize(currentSnapshots, fileSizes); + } + } finally { + readLock.unlock(); + } + } + + /** + * For each file in the map, this updates the first snapshot (lexicographic snapshot name) that + * references this file. + * + * @returns A Map of files and their sizes which none of the {@code snapshots} referred to. + */ + void groupArchivedFiledBySnapshotAndRecordSize( + List snapshots, Set> fileSizes) throws IOException { + // Make a copy as we'll modify it. + final Map filesToUpdate = new HashMap<>(fileSizes.size()); + for (Entry entry : fileSizes) { + filesToUpdate.put(entry.getKey(), entry.getValue()); + } + // Track the change in size to each snapshot + final Map snapshotSizeChanges = new HashMap<>(); + for (String snapshot : snapshots) { + // For each file in `filesToUpdate`, check if `snapshot` refers to it. + // If `snapshot` does, remove it from `filesToUpdate` and add it to `snapshotSizeChanges`. + bucketFilesToSnapshot(snapshot, filesToUpdate, snapshotSizeChanges); + if (filesToUpdate.isEmpty()) { + // If we have no more files recently archived, we have nothing more to check + break; + } + } + // We have computed changes to the snapshot size, we need to record them. + if (!snapshotSizeChanges.isEmpty()) { + if (LOG.isTraceEnabled()) { + LOG.trace("Writing snapshot size changes for: " + snapshotSizeChanges); + } + persistSnapshotSizeChanges(snapshotSizeChanges); + } + } + + /** + * For the given snapshot, find all files which this {@code snapshotName} references. After a file + * is found to be referenced by the snapshot, it is removed from {@code filesToUpdate} and + * {@code snapshotSizeChanges} is updated in concert. + * + * @param snapshotName The snapshot to check + * @param filesToUpdate A mapping of archived files to their size + * @param snapshotSizeChanges A mapping of snapshots and their change in size + */ + void bucketFilesToSnapshot( + String snapshotName, Map filesToUpdate, Map snapshotSizeChanges) + throws IOException { + // A quick check to avoid doing work if the caller unnecessarily invoked this method. + if (filesToUpdate.isEmpty()) { + return; + } + + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( + snapshotName, FSUtils.getRootDir(conf)); + SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); + // For each region referenced by the snapshot + for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { + // For each column family in this region + for (FamilyFiles ff : rm.getFamilyFilesList()) { + // And each store file in that family + for (StoreFile sf : ff.getStoreFilesList()) { + Long valueOrNull = filesToUpdate.remove(sf.getName()); + if (valueOrNull != null) { + // This storefile was recently archived, we should update this snapshot with its size + snapshotSizeChanges.merge(snapshotName, valueOrNull, Long::sum); + } + // Short-circuit, if we have no more files that were archived, we don't need to iterate + // over the rest of the snapshot. + if (filesToUpdate.isEmpty()) { + return; + } + } + } + } + } + + /** + * Reads the current size for each snapshot to update, generates a new update based on that value, + * and then writes the new update. + * + * @param snapshotSizeChanges A map of snapshot name to size change + */ + void persistSnapshotSizeChanges(Map snapshotSizeChanges) throws IOException { + try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + // Create a list (with a more typical ordering implied) + final List> snapshotSizeEntries = new ArrayList<>( + snapshotSizeChanges.entrySet()); + // Create the Gets for each snapshot we need to update + final List snapshotSizeGets = snapshotSizeEntries.stream() + .map((e) -> QuotaTableUtil.makeGetForSnapshotSize(tn, e.getKey())) + .collect(Collectors.toList()); + final Iterator> iterator = snapshotSizeEntries.iterator(); + // A List to store each Put we'll create from the Get's we retrieve + final List updates = new ArrayList<>(snapshotSizeEntries.size()); + + // TODO Push this down to the RegionServer with a coprocessor: + // + // We would really like to piggy-back on the row-lock already being grabbed + // to handle the update of the row in the quota table. However, because the value + // is a serialized protobuf, the standard Increment API doesn't work for us. With a CP, we + // can just send the size deltas to the RS and atomically update the serialized PB object + // while relying on the row-lock for synchronization. + // + // Synchronizing on the namespace string is a "minor smell" but passable as this is + // only invoked via a single caller (the active Master). Using the namespace name lets us + // have some parallelism without worry of on caller seeing stale data from the quota table. + synchronized (getLockForNamespace(tn.getNamespaceAsString())) { + final Result[] existingSnapshotSizes = quotaTable.get(snapshotSizeGets); + long totalSizeChange = 0; + // Read the current size values (if they exist) to generate the new value + for (Result result : existingSnapshotSizes) { + Entry entry = iterator.next(); + String snapshot = entry.getKey(); + Long size = entry.getValue(); + // Track the total size change for the namespace this table belongs in + totalSizeChange += size; + // Get the size of the previous value (or zero) + long previousSize = getSnapshotSizeFromResult(result); + // Create an update. A file was archived from the table, so the table's size goes + // down, but the snapshot's size goes up. + updates.add(QuotaTableUtil.createPutForSnapshotSize(tn, snapshot, previousSize + size)); + } + + // Create an update for the summation of all snapshots in the namespace + if (totalSizeChange != 0) { + long previousSize = getPreviousNamespaceSnapshotSize( + quotaTable, tn.getNamespaceAsString()); + updates.add(QuotaTableUtil.createPutForNamespaceSnapshotSize( + tn.getNamespaceAsString(), previousSize + totalSizeChange)); + } + + // Send all of the quota table updates in one batch. + List failures = new ArrayList<>(); + final Object[] results = new Object[updates.size()]; + quotaTable.batch(updates, results); + for (Object result : results) { + // A null result is an error condition (all RPC attempts failed) + if (!(result instanceof Result)) { + failures.add(result); + } + } + // Propagate a failure if any updates failed + if (!failures.isEmpty()) { + throw new IOException("Failed to write some snapshot size updates: " + failures); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } + } + + /** + * Fetches the current size of all snapshots in the given {@code namespace}. + * + * @param quotaTable The HBase quota table + * @param namespace Namespace to fetch the sum of snapshot sizes for + * @return The size of all snapshot sizes for the namespace in bytes. + */ + long getPreviousNamespaceSnapshotSize(Table quotaTable, String namespace) throws IOException { + // Update the size of each snapshot for all snapshots in a namespace. + Result r = quotaTable.get( + QuotaTableUtil.createGetNamespaceSnapshotSize(namespace)); + return getSnapshotSizeFromResult(r); + } + + /** + * Extracts the size component from a serialized {@link SpaceQuotaSnapshot} protobuf. + * + * @param r A Result containing one cell with a SpaceQuotaSnapshot protobuf + * @return The size in bytes of the snapshot. + */ + long getSnapshotSizeFromResult(Result r) throws InvalidProtocolBufferException { + // Per javadoc, Result should only be null if an exception was thrown. So, if we're here, + // we should be non-null. If we can't advance to the first cell, same as "no cell". + if (!r.isEmpty() && r.advance()) { + return QuotaTableUtil.parseSnapshotSize(r.current()); + } + return 0L; + } + + @Override + public long computeAndStoreSnapshotSizes( + Collection currentSnapshots) throws IOException { + writeLock.lock(); + try { + // Record what the current snapshots are + this.currentSnapshots = new ArrayList<>(currentSnapshots); + Collections.sort(this.currentSnapshots); + + // compute new size for table + snapshots for that table + List snapshotSizes = computeSnapshotSizes(this.currentSnapshots); + if (LOG.isTraceEnabled()) { + LOG.trace("Computed snapshot sizes for " + tn + " of " + snapshotSizes); + } + + // Compute the total size of all snapshots against our table + final long totalSnapshotSize = snapshotSizes.stream().mapToLong((sws) -> sws.getSize()).sum(); + + // Persist the size of each snapshot + try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + persistSnapshotSizes(quotaTable, snapshotSizes); + } + + // Report the last time we did a recomputation + lastFullCompute = System.nanoTime(); + + return totalSnapshotSize; + } finally { + writeLock.unlock(); + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("tableName=").append(tn).append(", currentSnapshots="); + sb.append(currentSnapshots).append(", lastFullCompute=").append(lastFullCompute); + return sb.append("]").toString(); + } + + /** + * Computes the size of each snapshot against the table referenced by {@code this}. + * + * @param snapshots A sorted list of snapshots against {@code tn}. + * @return A list of the size for each snapshot against {@code tn}. + */ + List computeSnapshotSizes(List snapshots) throws IOException { + final List snapshotSizes = new ArrayList<>(snapshots.size()); + final Path rootDir = FSUtils.getRootDir(conf); + + // Get the map of store file names to store file path for this table + final Set tableReferencedStoreFiles; + try { + tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir).keySet(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } + + if (LOG.isTraceEnabled()) { + LOG.trace("Paths for " + tn + ": " + tableReferencedStoreFiles); + } + + // For each snapshot on this table, get the files which the snapshot references which + // the table does not. + Set snapshotReferencedFiles = new HashSet<>(); + for (String snapshotName : snapshots) { + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); + SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); + + if (LOG.isTraceEnabled()) { + LOG.trace("Files referenced by other snapshots: " + snapshotReferencedFiles); + } + + // Get the set of files from the manifest that this snapshot references which are not also + // referenced by the originating table. + Set unreferencedStoreFileNames = getStoreFilesFromSnapshot( + manifest, (sfn) -> !tableReferencedStoreFiles.contains(sfn) + && !snapshotReferencedFiles.contains(sfn)); + + if (LOG.isTraceEnabled()) { + LOG.trace("Snapshot " + snapshotName + " solely references the files: " + + unreferencedStoreFileNames); + } + + // Compute the size of the store files for this snapshot + long size = getSizeOfStoreFiles(tn, unreferencedStoreFileNames); + if (LOG.isTraceEnabled()) { + LOG.trace("Computed size of " + snapshotName + " to be " + size); + } + + // Persist this snapshot's size into the map + snapshotSizes.add(new SnapshotWithSize(snapshotName, size)); + + // Make sure that we don't double-count the same file + for (StoreFileReference ref : unreferencedStoreFileNames) { + for (String fileNames : ref.getFamilyToFilesMapping().values()) { + snapshotReferencedFiles.add(fileNames); + } + } + } + + return snapshotSizes; + } + + /** + * Computes the size of each store file in {@code storeFileNames} + */ + long getSizeOfStoreFiles(TableName tn, Set storeFileNames) { + return storeFileNames.stream() + .collect(Collectors.summingLong((sfr) -> getSizeOfStoreFile(tn, sfr))); + } + + /** + * Computes the size of the store files for a single region. + */ + long getSizeOfStoreFile(TableName tn, StoreFileReference storeFileName) { + String regionName = storeFileName.getRegionName(); + return storeFileName.getFamilyToFilesMapping() + .entries().stream() + .collect(Collectors.summingLong((e) -> + getSizeOfStoreFile(tn, regionName, e.getKey(), e.getValue()))); + } + + /** + * Computes the size of the store file given its name, region and family name in + * the archive directory. + */ + long getSizeOfStoreFile( + TableName tn, String regionName, String family, String storeFile) { + Path familyArchivePath; + try { + familyArchivePath = HFileArchiveUtil.getStoreArchivePath(conf, tn, regionName, family); + } catch (IOException e) { + LOG.warn("Could not compute path for the archive directory for the region", e); + return 0L; + } + Path fileArchivePath = new Path(familyArchivePath, storeFile); + try { + if (fs.exists(fileArchivePath)) { + FileStatus[] status = fs.listStatus(fileArchivePath); + if (1 != status.length) { + LOG.warn("Expected " + fileArchivePath + + " to be a file but was a directory, ignoring reference"); + return 0L; + } + return status[0].getLen(); + } + } catch (IOException e) { + LOG.warn("Could not obtain the status of " + fileArchivePath, e); + return 0L; + } + LOG.warn("Expected " + fileArchivePath + " to exist but does not, ignoring reference."); + return 0L; + } + + /** + * Extracts the names of the store files referenced by this snapshot which satisfy the given + * predicate (the predicate returns {@code true}). + */ + Set getStoreFilesFromSnapshot( + SnapshotManifest manifest, Predicate filter) { + Set references = new HashSet<>(); + // For each region referenced by the snapshot + for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { + StoreFileReference regionReference = new StoreFileReference( + HRegionInfo.convert(rm.getRegionInfo()).getEncodedName()); + + // For each column family in this region + for (FamilyFiles ff : rm.getFamilyFilesList()) { + final String familyName = ff.getFamilyName().toStringUtf8(); + // And each store file in that family + for (StoreFile sf : ff.getStoreFilesList()) { + String storeFileName = sf.getName(); + // A snapshot only "inherits" a files size if it uniquely refers to it (no table + // and no other snapshot references it). + if (filter.test(storeFileName)) { + regionReference.addFamilyStoreFile(familyName, storeFileName); + } + } + } + // Only add this Region reference if we retained any files. + if (!regionReference.getFamilyToFilesMapping().isEmpty()) { + references.add(regionReference); + } + } + return references; + } + + /** + * Writes the snapshot sizes to the provided {@code table}. + */ + void persistSnapshotSizes( + Table table, List snapshotSizes) throws IOException { + // Convert each entry in the map to a Put and write them to the quota table + table.put(snapshotSizes + .stream() + .map(sws -> QuotaTableUtil.createPutForSnapshotSize( + tn, sws.getName(), sws.getSize())) + .collect(Collectors.toList())); + } + + /** + * A struct encapsulating the name of a snapshot and its "size" on the filesystem. This size is + * defined as the amount of filesystem space taken by the files the snapshot refers to which + * the originating table no longer refers to. + */ + static class SnapshotWithSize { + private final String name; + private final long size; + + SnapshotWithSize(String name, long size) { + this.name = Objects.requireNonNull(name); + this.size = size; + } + + String getName() { + return name; + } + + long getSize() { + return size; + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(name).append(size).toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (!(o instanceof SnapshotWithSize)) { + return false; + } + + SnapshotWithSize other = (SnapshotWithSize) o; + return name.equals(other.name) && size == other.size; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(32); + return sb.append("SnapshotWithSize:[").append(name).append(" ") + .append(StringUtils.byteDesc(size)).append("]").toString(); + } + } + + /** + * A reference to a collection of files in the archive directory for a single region. + */ + static class StoreFileReference { + private final String regionName; + private final Multimap familyToFiles; + + StoreFileReference(String regionName) { + this.regionName = Objects.requireNonNull(regionName); + familyToFiles = HashMultimap.create(); + } + + String getRegionName() { + return regionName; + } + + Multimap getFamilyToFilesMapping() { + return familyToFiles; + } + + void addFamilyStoreFile(String family, String storeFileName) { + familyToFiles.put(family, storeFileName); + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(regionName).append(familyToFiles).toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof StoreFileReference)) { + return false; + } + StoreFileReference other = (StoreFileReference) o; + return regionName.equals(other.regionName) && familyToFiles.equals(other.familyToFiles); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + return sb.append("StoreFileReference[region=").append(regionName).append(", files=") + .append(familyToFiles).append("]").toString(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 8f6a33aa70..3664eb4908 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -24,12 +24,15 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.namespace.NamespaceAuditor; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -49,9 +53,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.HashMultimap; + +import jersey.repackaged.com.google.common.collect.Maps; /** * Master Quota Manager. @@ -638,5 +647,26 @@ public class MasterQuotaManager implements RegionStateListener { } return numEntriesRemoved; } + + public void processFileArchivals(FileArchiveNotificationRequest request, Connection conn, + Configuration conf, FileSystem fs) throws IOException { + final HashMultimap> archivedFilesByTable = HashMultimap.create(); + // Group the archived files by table + for (FileWithSize fileWithSize : request.getArchivedFilesList()) { + TableName tn = ProtobufUtil.toTableName(fileWithSize.getTableName()); + archivedFilesByTable.put( + tn, Maps.immutableEntry(fileWithSize.getName(), fileWithSize.getSize())); + } + if (LOG.isTraceEnabled()) { + LOG.trace("Grouped archived files by table: " + archivedFilesByTable); + } + // Report each set of files to the appropriate object + for (TableName tn : archivedFilesByTable.keySet()) { + final Set> filesWithSize = archivedFilesByTable.get(tn); + final FileArchiverNotifier notifier = FileArchiverNotifierFactoryImpl.getInstance().get( + conn, conf, fs, tn); + notifier.addArchivedFiles(filesWithSize); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index 64491b78c6..404337baaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; +import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -31,6 +32,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; import com.google.common.annotations.VisibleForTesting; @@ -234,6 +239,34 @@ public class RegionServerSpaceQuotaManager { } /** + * Builds the protobuf message to inform the Master of files being archived. + * + * @param tn The table the files previously belonged to. + * @param archivedFiles The files and their size in bytes that were archived. + * @return The protobuf representation + */ + public RegionServerStatusProtos.FileArchiveNotificationRequest buildFileArchiveRequest( + TableName tn, Collection> archivedFiles) { + RegionServerStatusProtos.FileArchiveNotificationRequest.Builder builder = + RegionServerStatusProtos.FileArchiveNotificationRequest.newBuilder(); + HBaseProtos.TableName protoTn = ProtobufUtil.toProtoTableName(tn); + for (Entry archivedFile : archivedFiles) { + RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize fws = + RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.newBuilder() + .setName(archivedFile.getKey()) + .setSize(archivedFile.getValue()) + .setTableName(protoTn) + .build(); + builder.addArchivedFiles(fws); + } + final RegionServerStatusProtos.FileArchiveNotificationRequest request = builder.build(); + if (LOG.isTraceEnabled()) { + LOG.trace("Reporting file archival to Master: " + TextFormat.shortDebugString(request)); + } + return request; + } + + /** * Returns the collection of tables which have quota violation policies enforced on * this RegionServer. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java index 46f5a64286..7c7d5b3ea2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java @@ -17,28 +17,20 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; import java.util.stream.Collectors; -import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; @@ -48,15 +40,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MetricsMaster; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.SnapshotManifest; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.HFileArchiveUtil; -import org.apache.hadoop.util.StringUtils; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -128,11 +111,11 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { } // For each table, compute the size of each snapshot - Multimap snapshotsWithSize = computeSnapshotSizes( - snapshotsToComputeSize); + Map namespaceSnapshotSizes = computeSnapshotSizes(snapshotsToComputeSize); - // Write the size data to the quota table. - persistSnapshotSizes(snapshotsWithSize); + // Write the size data by namespaces to the quota table. + // We need to do this "globally" since each FileArchiverNotifier is limited to its own Table. + persistSnapshotSizesForNamespaces(namespaceSnapshotSizes); } /** @@ -184,325 +167,55 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { /** * Computes the size of each snapshot provided given the current files referenced by the table. + * The size of each individual snapshot is recorded to the quota table as a part of this method. * * @param snapshotsToComputeSize The snapshots to compute the size of - * @return A mapping of table to snapshot created from that table and the snapshot's size. + * @return A mapping of snapshot sizes for each namespace */ - Multimap computeSnapshotSizes( + Map computeSnapshotSizes( Multimap snapshotsToComputeSize) throws IOException { - Multimap snapshotSizes = HashMultimap.create(); + final Map snapshotSizesByNamespace = new HashMap<>(); + final long start = System.nanoTime(); for (Entry> entry : snapshotsToComputeSize.asMap().entrySet()) { final TableName tn = entry.getKey(); - final List snapshotNames = new ArrayList<>(entry.getValue()); - // Sort the snapshots so we process them in lexicographic order. This ensures that multiple - // invocations of this Chore do not more the size ownership of some files between snapshots - // that reference the file (prevents size ownership from moving between snapshots). - Collections.sort(snapshotNames); - final Path rootDir = FSUtils.getRootDir(conf); - // Get the map of store file names to store file path for this table - // TODO is the store-file name unique enough? Does this need to be region+family+storefile? - final Set tableReferencedStoreFiles; - try { - tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir).keySet(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return null; - } - - if (LOG.isTraceEnabled()) { - LOG.trace("Paths for " + tn + ": " + tableReferencedStoreFiles); - } - - // For each snapshot on this table, get the files which the snapshot references which - // the table does not. - Set snapshotReferencedFiles = new HashSet<>(); - for (String snapshotName : snapshotNames) { - final long start = System.nanoTime(); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); - SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); - - if (LOG.isTraceEnabled()) { - LOG.trace("Files referenced by other snapshots: " + snapshotReferencedFiles); - } - - // Get the set of files from the manifest that this snapshot references which are not also - // referenced by the originating table. - Set unreferencedStoreFileNames = getStoreFilesFromSnapshot( - manifest, (sfn) -> !tableReferencedStoreFiles.contains(sfn) - && !snapshotReferencedFiles.contains(sfn)); - - if (LOG.isTraceEnabled()) { - LOG.trace("Snapshot " + snapshotName + " solely references the files: " - + unreferencedStoreFileNames); - } - - // Compute the size of the store files for this snapshot - long size = getSizeOfStoreFiles(tn, unreferencedStoreFileNames); - if (LOG.isTraceEnabled()) { - LOG.trace("Computed size of " + snapshotName + " to be " + size); - } + final Collection snapshotNames = entry.getValue(); - // Persist this snapshot's size into the map - snapshotSizes.put(tn, new SnapshotWithSize(snapshotName, size)); + // Get our notifier instance, this is tracking archivals that happen out-of-band of this chore + FileArchiverNotifier notifier = getNotifierForTable(tn); - // Make sure that we don't double-count the same file - for (StoreFileReference ref : unreferencedStoreFileNames) { - for (String fileName : ref.getFamilyToFilesMapping().values()) { - snapshotReferencedFiles.add(fileName); - } - } - // Update the amount of time it took to compute the snapshot's size - if (null != metrics) { - metrics.incrementSnapshotSizeComputationTime((System.nanoTime() - start) / 1_000_000); - } - } + // The total size consumed by all snapshots against this table + long totalSnapshotSize = notifier.computeAndStoreSnapshotSizes(snapshotNames); + // Bucket that size into the appropriate namespace + snapshotSizesByNamespace.merge(tn.getNamespaceAsString(), totalSnapshotSize, Long::sum); } - return snapshotSizes; - } - - /** - * Extracts the names of the store files referenced by this snapshot which satisfy the given - * predicate (the predicate returns {@code true}). - */ - Set getStoreFilesFromSnapshot( - SnapshotManifest manifest, Predicate filter) { - Set references = new HashSet<>(); - // For each region referenced by the snapshot - for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { - StoreFileReference regionReference = new StoreFileReference( - HRegionInfo.convert(rm.getRegionInfo()).getEncodedName()); - // For each column family in this region - for (FamilyFiles ff : rm.getFamilyFilesList()) { - final String familyName = ff.getFamilyName().toStringUtf8(); - // And each store file in that family - for (StoreFile sf : ff.getStoreFilesList()) { - String storeFileName = sf.getName(); - // A snapshot only "inherits" a files size if it uniquely refers to it (no table - // and no other snapshot references it). - if (filter.test(storeFileName)) { - regionReference.addFamilyStoreFile(familyName, storeFileName); - } - } - } - // Only add this Region reference if we retained any files. - if (!regionReference.getFamilyToFilesMapping().isEmpty()) { - references.add(regionReference); - } + // Update the amount of time it took to compute the size of the snapshots for a table + if (null != metrics) { + metrics.incrementSnapshotSizeComputationTime((System.nanoTime() - start) / 1_000_000); } - return references; - } - /** - * Calculates the directory in HDFS for a table based on the configuration. - */ - Path getTableDir(TableName tn) throws IOException { - Path rootDir = FSUtils.getRootDir(conf); - return FSUtils.getTableDir(rootDir, tn); - } - - /** - * Computes the size of each store file in {@code storeFileNames} - */ - long getSizeOfStoreFiles(TableName tn, Set storeFileNames) { - return storeFileNames.stream() - .collect(Collectors.summingLong((sfr) -> getSizeOfStoreFile(tn, sfr))); + return snapshotSizesByNamespace; } /** - * Computes the size of the store files for a single region. - */ - long getSizeOfStoreFile(TableName tn, StoreFileReference storeFileName) { - String regionName = storeFileName.getRegionName(); - return storeFileName.getFamilyToFilesMapping() - .entries().stream() - .collect(Collectors.summingLong((e) -> - getSizeOfStoreFile(tn, regionName, e.getKey(), e.getValue()))); - } - - /** - * Computes the size of the store file given its name, region and family name in - * the archive directory. - */ - long getSizeOfStoreFile( - TableName tn, String regionName, String family, String storeFile) { - Path familyArchivePath; - try { - familyArchivePath = HFileArchiveUtil.getStoreArchivePath(conf, tn, regionName, family); - } catch (IOException e) { - LOG.warn("Could not compute path for the archive directory for the region", e); - return 0L; - } - Path fileArchivePath = new Path(familyArchivePath, storeFile); - try { - if (fs.exists(fileArchivePath)) { - FileStatus[] status = fs.listStatus(fileArchivePath); - if (1 != status.length) { - LOG.warn("Expected " + fileArchivePath + - " to be a file but was a directory, ignoring reference"); - return 0L; - } - return status[0].getLen(); - } - } catch (IOException e) { - LOG.warn("Could not obtain the status of " + fileArchivePath, e); - return 0L; - } - LOG.warn("Expected " + fileArchivePath + " to exist but does not, ignoring reference."); - return 0L; - } - - /** - * Writes the snapshot sizes to the {@code hbase:quota} table. + * Returns the correct instance of {@link FileArchiverNotifier} for the given table name. * - * @param snapshotsWithSize The snapshot sizes to write. - */ - void persistSnapshotSizes( - Multimap snapshotsWithSize) throws IOException { - try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { - // Write each snapshot size for the table - persistSnapshotSizes(quotaTable, snapshotsWithSize); - // Write a size entry for all snapshots in a namespace - persistSnapshotSizesByNS(quotaTable, snapshotsWithSize); - } - } - - /** - * Writes the snapshot sizes to the provided {@code table}. - */ - void persistSnapshotSizes( - Table table, Multimap snapshotsWithSize) throws IOException { - // Convert each entry in the map to a Put and write them to the quota table - table.put(snapshotsWithSize.entries() - .stream() - .map(e -> QuotaTableUtil.createPutForSnapshotSize( - e.getKey(), e.getValue().getName(), e.getValue().getSize())) - .collect(Collectors.toList())); - } - - /** - * Rolls up the snapshot sizes by namespace and writes a single record for each namespace - * which is the size of all snapshots in that namespace. - */ - void persistSnapshotSizesByNS( - Table quotaTable, Multimap snapshotsWithSize) throws IOException { - Map namespaceSnapshotSizes = groupSnapshotSizesByNamespace(snapshotsWithSize); - quotaTable.put(namespaceSnapshotSizes.entrySet().stream() - .map(e -> QuotaTableUtil.createPutForNamespaceSnapshotSize( - e.getKey(), e.getValue())) - .collect(Collectors.toList())); - } - - /** - * Sums the snapshot sizes for each namespace. + * @param tn The table name + * @return A {@link FileArchiverNotifier} instance */ - Map groupSnapshotSizesByNamespace( - Multimap snapshotsWithSize) { - return snapshotsWithSize.entries().stream() - .collect(Collectors.groupingBy( - // Convert TableName into the namespace string - (e) -> e.getKey().getNamespaceAsString(), - // Sum the values for namespace - Collectors.mapping( - Map.Entry::getValue, Collectors.summingLong((sws) -> sws.getSize())))); + FileArchiverNotifier getNotifierForTable(TableName tn) { + return FileArchiverNotifierFactoryImpl.getInstance().get(conn, conf, fs, tn); } /** - * A struct encapsulating the name of a snapshot and its "size" on the filesystem. This size is - * defined as the amount of filesystem space taken by the files the snapshot refers to which - * the originating table no longer refers to. + * Writes the size used by snapshots for each namespace to the quota table. */ - static class SnapshotWithSize { - private final String name; - private final long size; - - SnapshotWithSize(String name, long size) { - this.name = Objects.requireNonNull(name); - this.size = size; - } - - String getName() { - return name; - } - - long getSize() { - return size; - } - - @Override - public int hashCode() { - return new HashCodeBuilder().append(name).append(size).toHashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof SnapshotWithSize)) { - return false; - } - - SnapshotWithSize other = (SnapshotWithSize) o; - return name.equals(other.name) && size == other.size; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(32); - return sb.append("SnapshotWithSize:[").append(name).append(" ") - .append(StringUtils.byteDesc(size)).append("]").toString(); - } - } - - /** - * A reference to a collection of files in the archive directory for a single region. - */ - static class StoreFileReference { - private final String regionName; - private final Multimap familyToFiles; - - StoreFileReference(String regionName) { - this.regionName = Objects.requireNonNull(regionName); - familyToFiles = HashMultimap.create(); - } - - String getRegionName() { - return regionName; - } - - Multimap getFamilyToFilesMapping() { - return familyToFiles; - } - - void addFamilyStoreFile(String family, String storeFileName) { - familyToFiles.put(family, storeFileName); - } - - @Override - public int hashCode() { - return new HashCodeBuilder().append(regionName).append(familyToFiles).toHashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof StoreFileReference)) { - return false; - } - StoreFileReference other = (StoreFileReference) o; - return regionName.equals(other.regionName) && familyToFiles.equals(other.familyToFiles); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - return sb.append("StoreFileReference[region=").append(regionName).append(", files=") - .append(familyToFiles).append("]").toString(); + void persistSnapshotSizesForNamespaces( + Map snapshotSizesByNamespace) throws IOException { + try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { + quotaTable.put(snapshotSizesByNamespace.entrySet().stream() + .map(e -> QuotaTableUtil.createPutForNamespaceSnapshotSize(e.getKey(), e.getValue())) + .collect(Collectors.toList())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index af26c38a0b..f81588c2d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -157,6 +157,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpeci import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -3743,4 +3744,38 @@ public class HRegionServer extends HasThread implements public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() { return this.rsSpaceQuotaManager; } + + @Override + public boolean reportFileArchivalForQuotas(TableName tableName, + Collection> archivedFiles) { + RegionServerStatusService.BlockingInterface rss = rssStub; + if (rss == null) { + // the current server could be stopping. + LOG.trace("Skipping file archival reporting to HMaster as stub is null"); + return false; + } + try { + RegionServerStatusProtos.FileArchiveNotificationRequest request = + rsSpaceQuotaManager.buildFileArchiveRequest(tableName, archivedFiles); + rss.reportFileArchival(null, request); + } catch (ServiceException se) { + IOException ioe = ProtobufUtil.getRemoteException(se); + if (ioe instanceof PleaseHoldException) { + if (LOG.isTraceEnabled()) { + LOG.trace("Failed to report file archival(s) to Master because it is initializing." + + " This will be retried.", ioe); + } + // The Master is coming up. Will retry the report later. Avoid re-creating the stub. + return false; + } + if (rssStub == rss) { + rssStub = null; + } + // re-create the stub if we failed to report the archival + createRegionServerStatusStub(true); + LOG.debug("Failed to report file archival(s) to Master. This will be retried.", ioe); + return false; + } + return true; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 42379e6539..6231722040 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map.Entry; import java.util.NavigableSet; import java.util.Set; import java.util.concurrent.Callable; @@ -98,6 +99,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; /** @@ -2513,6 +2515,7 @@ public class HStore implements Store { private void removeCompactedfiles(Collection compactedfiles) throws IOException { final List filesToRemove = new ArrayList<>(compactedfiles.size()); + final List storeFileSizes = new ArrayList<>(compactedfiles.size()); for (final StoreFile file : compactedfiles) { synchronized (file) { try { @@ -2521,7 +2524,17 @@ public class HStore implements Store { if (LOG.isDebugEnabled()) { LOG.debug("The file " + file + " was closed but still not archived."); } + // Temporarily re-open the reader so we can get the size of the file + long length = 0; + try { + file.initReader(); + length = file.getReader().length(); + } finally { + file.closeReader( + file.getCacheConf() != null ? file.getCacheConf().shouldEvictOnClose() : true); + } filesToRemove.add(file); + storeFileSizes.add(length); continue; } if (file.isCompactedAway() && !file.isReferencedInReads()) { @@ -2530,9 +2543,13 @@ public class HStore implements Store { if (LOG.isTraceEnabled()) { LOG.trace("Closing and archiving the file " + file.getPath()); } + // Copy the file size before closing the reader + final long length = r.length(); r.close(true); // Just close and return filesToRemove.add(file); + // Only add the length if we successfully added the file to `filesToRemove` + storeFileSizes.add(length); } } catch (Exception e) { LOG.error( @@ -2557,9 +2574,12 @@ public class HStore implements Store { // FileNotFoundException when we attempt to re-archive them in the next go around. Collection failedFiles = fae.getFailedFiles(); Iterator iter = filesToRemove.iterator(); + Iterator sizeIter = storeFileSizes.iterator(); while (iter.hasNext()) { + sizeIter.next(); if (failedFiles.contains(iter.next().getPath())) { iter.remove(); + sizeIter.remove(); } } if (!filesToRemove.isEmpty()) { @@ -2572,6 +2592,8 @@ public class HStore implements Store { if (!filesToRemove.isEmpty()) { // Clear the compactedfiles from the store file manager clearCompactedfiles(filesToRemove); + // Try to send report of this archival to the Master for updating quota usage faster + reportArchivedFilesForQuota(filesToRemove, storeFileSizes); } } @@ -2595,4 +2617,31 @@ public class HStore implements Store { lock.writeLock().unlock(); } } + + void reportArchivedFilesForQuota(List archivedFiles, List fileSizes) { + // Sanity check from the caller + if (archivedFiles.size() != fileSizes.size()) { + throw new RuntimeException("Coding error: should never see lists of varying size"); + } + RegionServerServices rss = this.region.getRegionServerServices(); + if (rss == null) { + return; + } + List> filesWithSizes = new ArrayList<>(archivedFiles.size()); + Iterator fileSizeIter = fileSizes.iterator(); + for (StoreFile storeFile : archivedFiles) { + final long fileSize = fileSizeIter.next(); + if (storeFile.isHFile() && fileSize != 0) { + filesWithSizes.add(Maps.immutableEntry(storeFile.getPath().getName(), fileSize)); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("Files archived: " + archivedFiles + ", reporting the following to the Master: " + + filesWithSizes); + } + boolean success = rss.reportFileArchivalForQuotas(getTableName(), filesWithSizes); + if (LOG.isTraceEnabled() && !success) { + LOG.trace("Failed to report archival of files: " + filesWithSizes); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index f4a4a108aa..05be3503c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentMap; @@ -288,4 +290,15 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi * @return {@code false} if reporting should be temporarily paused, {@code true} otherwise. */ boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore); + + /** + * Reports a collection of files, and their sizes, that belonged to the given {@code table} were + * just moved to the archive directory. + * + * @param tableName The name of the table that files previously belonged to + * @param archivedFiles Files and their sizes that were moved to archive + * @return {@code true} if the files were successfully reported, {@code false} otherwise. + */ + boolean reportFileArchivalForQuotas( + TableName tableName, Collection> archivedFiles); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 7bbf29d36b..8f677924f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -362,4 +364,10 @@ public class MockRegionServerServices implements RegionServerServices { public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) { return true; } + + @Override + public boolean reportFileArchivalForQuotas( + TableName tableName, Collection> archivedFiles) { + return true; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index a542192425..dedca82401 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Random; import java.util.Set; import java.util.TreeMap; @@ -746,4 +748,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) { return true; } + + @Override + public boolean reportFileArchivalForQuotas( + TableName tableName, Collection> archivedFiles) { + return true; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java new file mode 100644 index 0000000000..f63d38dc3e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java @@ -0,0 +1,307 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.quotas; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotType; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.quotas.FileArchiverNotifierImpl.SnapshotWithSize; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.common.collect.Maps; + +/** + * Test class for {@link FileArchiverNotifierImpl}. + */ +@Category(MediumTests.class) +public class TestFileArchiverNotifierImpl { + private static final Log LOG = LogFactory.getLog(TestFileArchiverNotifierImpl.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final AtomicLong COUNTER = new AtomicLong(); + + @Rule + public TestName testName = new TestName(); + + private Connection conn; + private Admin admin; + private SpaceQuotaHelperForTests helper; + private FileSystem fs; + private Configuration conf; + + @BeforeClass + public static void setUp() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + SpaceQuotaHelperForTests.updateConfigForQuotas(conf); + // Clean up the compacted files faster than normal (15s instead of 2mins) + conf.setInt("hbase.hfile.compaction.discharger.interval", 15 * 1000); + // Prevent the SnapshotQuotaObserverChore from running + conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_DELAY_KEY, 60 * 60 * 1000); + conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, 60 * 60 * 1000); + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws Exception { + conn = TEST_UTIL.getConnection(); + admin = TEST_UTIL.getAdmin(); + helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER); + helper.removeAllQuotas(conn); + fs = TEST_UTIL.getTestFileSystem(); + conf = TEST_UTIL.getConfiguration(); + } + + @Test + public void testSnapshotSizePersistence() throws IOException { + final Admin admin = TEST_UTIL.getAdmin(); + final TableName tn = TableName.valueOf(testName.getMethodName()); + if (admin.tableExists(tn)) { + admin.disableTable(tn); + admin.deleteTable(tn); + } + HTableDescriptor desc = new HTableDescriptor(tn); + desc.addFamily(new HColumnDescriptor(QuotaTableUtil.QUOTA_FAMILY_USAGE)); + admin.createTable(desc); + + FileArchiverNotifierImpl notifier = new FileArchiverNotifierImpl(conn, conf, fs, tn); + List snapshotsWithSizes = new ArrayList<>(); + try (Table table = conn.getTable(tn)) { + // Writing no values will result in no records written. + verify(table, () -> { + notifier.persistSnapshotSizes(table, snapshotsWithSizes); + assertEquals(0, count(table)); + }); + + verify(table, () -> { + snapshotsWithSizes.add(new SnapshotWithSize("ss1", 1024L)); + snapshotsWithSizes.add(new SnapshotWithSize("ss2", 4096L)); + notifier.persistSnapshotSizes(table, snapshotsWithSizes); + assertEquals(2, count(table)); + assertEquals(1024L, extractSnapshotSize(table, tn, "ss1")); + assertEquals(4096L, extractSnapshotSize(table, tn, "ss2")); + }); + } + } + + @Test + public void testIncrementalFileArchiving() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final TableName tn = TableName.valueOf(testName.getMethodName()); + if (admin.tableExists(tn)) { + admin.disableTable(tn); + admin.deleteTable(tn); + } + final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME); + final TableName tn1 = helper.createTableWithRegions(1); + admin.setQuota(QuotaSettingsFactory.limitTableSpace( + tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + + // Write some data and flush it + helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); + admin.flush(tn1); + + // Create a snapshot on the table + final String snapshotName1 = tn1 + "snapshot1"; + admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH)); + + FileArchiverNotifierImpl notifier = new FileArchiverNotifierImpl(conn, conf, fs, tn); + long t1 = notifier.getLastFullCompute(); + long snapshotSize = notifier.computeAndStoreSnapshotSizes(Arrays.asList(snapshotName1)); + assertEquals("The size of the snapshots should be zero", 0, snapshotSize); + assertTrue("Last compute time was not less than current compute time", + t1 < notifier.getLastFullCompute()); + + // No recently archived files and the snapshot should have no size + assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1)); + + // Invoke the addArchivedFiles method with no files + notifier.addArchivedFiles(Collections.emptySet()); + + // The size should not have changed + assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1)); + + notifier.addArchivedFiles(ImmutableSet.of(entry("a", 1024L), entry("b", 1024L))); + + // The size should not have changed + assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1)); + + // Pull one file referenced by the snapshot out of the manifest + Set referencedFiles = getFilesReferencedBySnapshot(snapshotName1); + assertTrue("Found snapshot referenced files: " + referencedFiles, referencedFiles.size() >= 1); + String referencedFile = Iterables.getFirst(referencedFiles, null); + assertNotNull(referencedFile); + + // Report that a file this snapshot referenced was moved to the archive. This is a sign + // that the snapshot should now "own" the size of this file + final long fakeFileSize = 2048L; + notifier.addArchivedFiles(ImmutableSet.of(entry(referencedFile, fakeFileSize))); + + // Verify that the snapshot owns this file. + assertEquals(fakeFileSize, extractSnapshotSize(quotaTable, tn, snapshotName1)); + + // In reality, we did not actually move the file, so a "full" computation should re-set the + // size of the snapshot back to 0. + long t2 = notifier.getLastFullCompute(); + snapshotSize = notifier.computeAndStoreSnapshotSizes(Arrays.asList(snapshotName1)); + assertEquals(0, snapshotSize); + assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1)); + // We should also have no recently archived files after a re-computation + assertTrue("Last compute time was not less than current compute time", + t2 < notifier.getLastFullCompute()); + } + + @Test + public void testParseOldNamespaceSnapshotSize() throws Exception { + final Admin admin = TEST_UTIL.getAdmin(); + final TableName fakeQuotaTableName = TableName.valueOf(testName.getMethodName()); + final TableName tn = TableName.valueOf(testName.getMethodName() + "1"); + if (admin.tableExists(fakeQuotaTableName)) { + admin.disableTable(fakeQuotaTableName); + admin.deleteTable(fakeQuotaTableName); + } + HTableDescriptor desc = new HTableDescriptor(fakeQuotaTableName); + desc.addFamily(new HColumnDescriptor(QuotaUtil.QUOTA_FAMILY_USAGE)); + desc.addFamily(new HColumnDescriptor(QuotaUtil.QUOTA_FAMILY_INFO)); + admin.createTable(desc); + + final String ns = ""; + try (Table fakeQuotaTable = conn.getTable(fakeQuotaTableName)) { + FileArchiverNotifierImpl notifier = new FileArchiverNotifierImpl(conn, conf, fs, tn); + // Verify no record is treated as zero + assertEquals(0, notifier.getPreviousNamespaceSnapshotSize(fakeQuotaTable, ns)); + + // Set an explicit value of zero + fakeQuotaTable.put(QuotaTableUtil.createPutForNamespaceSnapshotSize(ns, 0L)); + assertEquals(0, notifier.getPreviousNamespaceSnapshotSize(fakeQuotaTable, ns)); + + // Set a non-zero value + fakeQuotaTable.put(QuotaTableUtil.createPutForNamespaceSnapshotSize(ns, 1024L)); + assertEquals(1024L, notifier.getPreviousNamespaceSnapshotSize(fakeQuotaTable, ns)); + } + } + + private long count(Table t) throws IOException { + try (ResultScanner rs = t.getScanner(new Scan())) { + long sum = 0; + for (Result r : rs) { + while (r.advance()) { + sum++; + } + } + return sum; + } + } + + private long extractSnapshotSize( + Table quotaTable, TableName tn, String snapshot) throws IOException { + Get g = QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot); + Result r = quotaTable.get(g); + assertNotNull(r); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + Cell c = cs.current(); + assertNotNull(c); + return QuotaTableUtil.extractSnapshotSize( + c.getValueArray(), c.getValueOffset(), c.getValueLength()); + } + + private void verify(Table t, IOThrowingRunnable test) throws IOException { + admin.disableTable(t.getName()); + admin.truncateTable(t.getName(), false); + test.run(); + } + + @FunctionalInterface + private interface IOThrowingRunnable { + void run() throws IOException; + } + + private Set getFilesReferencedBySnapshot(String snapshotName) throws IOException { + HashSet files = new HashSet<>(); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( + snapshotName, FSUtils.getRootDir(conf)); + SnapshotProtos.SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo( + fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); + // For each region referenced by the snapshot + for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { + // For each column family in this region + for (FamilyFiles ff : rm.getFamilyFilesList()) { + // And each store file in that family + for (StoreFile sf : ff.getStoreFilesList()) { + files.add(sf.getName()); + } + } + } + return files; + } + + private Entry entry(K k, V v) { + return Maps.immutableEntry(k, v); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java index 1104c075db..4d6f74f6e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java @@ -19,21 +19,30 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClientServiceCallable; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RpcRetryingCaller; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; +import org.apache.hadoop.hbase.client.SnapshotType; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.AfterClass; import org.junit.Before; @@ -62,8 +71,15 @@ public class TestLowLatencySpaceQuotas { Configuration conf = TEST_UTIL.getConfiguration(); // The default 1s period for QuotaObserverChore is good. SpaceQuotaHelperForTests.updateConfigForQuotas(conf); - // Set the period to read region size from HDFS to be very long + // Set the period/delay to read region size from HDFS to be very long conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 1000 * 120); + conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 1000 * 120); + // Set the same long period/delay to compute snapshot sizes + conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, 1000 * 120); + conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_DELAY_KEY, 1000 * 120); + // Clean up the compacted files faster than normal (5s instead of 2mins) + conf.setInt("hbase.hfile.compaction.discharger.interval", 5 * 1000); + TEST_UTIL.startMiniCluster(1); } @@ -218,4 +234,69 @@ public class TestLowLatencySpaceQuotas { } }); } + + @Test + public void testSnapshotSizes() throws Exception { + TableName tn = helper.createTableWithRegions(1); + // Set a quota + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( + tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + admin.setQuota(settings); + + // Write some data and flush it to disk. + final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; + helper.writeData(tn, sizePerBatch); + admin.flush(tn); + + final String snapshot1 = "snapshot1"; + admin.snapshot(snapshot1, tn, SnapshotType.SKIPFLUSH); + + // Compute the size of the file for the Region we'll send to archive + HRegion region = Iterables.getOnlyElement(TEST_UTIL.getHBaseCluster().getRegions(tn)); + List stores = region.getStores(); + long summer = 0; + for (Store store : stores) { + summer += store.getStorefilesSize(); + } + final long storeFileSize = summer; + + // Wait for the table to show the usage + TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { + @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + return snapshot.getUsage() == storeFileSize; + } + }); + + // Spoof a "full" computation of snapshot size. Normally the chore handles this, but we want + // to test in the absence of this chore. + FileArchiverNotifier notifier = TEST_UTIL.getHBaseCluster().getMaster() + .getSnapshotQuotaObserverChore().getNotifierForTable(tn); + notifier.computeAndStoreSnapshotSizes(Collections.singletonList(snapshot1)); + + // Force a major compaction to create a new file and push the old file to the archive + TEST_UTIL.compact(tn, true); + + // After moving the old file to archive/, the space of this table should double + // We have a new file created by the majc referenced by the table and the snapshot still + // referencing the old file. + TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { + @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + return snapshot.getUsage() >= 2 * storeFileSize; + } + }); + + try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { + Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot1)); + assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty()); + assertTrue(r.advance()); + assertEquals("The snapshot's size should be the same as the origin store file", + storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current())); + + r = quotaTable.get(QuotaTableUtil.createGetNamespaceSnapshotSize(tn.getNamespaceAsString())); + assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty()); + assertTrue(r.advance()); + assertEquals("The snapshot's size should be the same as the origin store file", + storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current())); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java index 4022e3ffeb..a54ef84bb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java @@ -17,36 +17,36 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize; import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge; import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -59,7 +59,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import com.google.common.collect.HashMultimap; -import com.google.common.collect.Iterables; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Multimap; /** @@ -107,50 +107,6 @@ public class TestSnapshotQuotaObserverChore { } @Test - public void testSnapshotSizePersistence() throws IOException { - final Admin admin = TEST_UTIL.getAdmin(); - final TableName tn = TableName.valueOf("quota_snapshotSizePersistence"); - if (admin.tableExists(tn)) { - admin.disableTable(tn); - admin.deleteTable(tn); - } - HTableDescriptor desc = new HTableDescriptor(tn); - desc.addFamily(new HColumnDescriptor(QuotaTableUtil.QUOTA_FAMILY_USAGE)); - admin.createTable(desc); - - Multimap snapshotsWithSizes = HashMultimap.create(); - try (Table table = conn.getTable(tn)) { - // Writing no values will result in no records written. - verify(table, () -> { - testChore.persistSnapshotSizes(table, snapshotsWithSizes); - assertEquals(0, count(table)); - }); - - verify(table, () -> { - TableName originatingTable = TableName.valueOf("t1"); - snapshotsWithSizes.put(originatingTable, new SnapshotWithSize("ss1", 1024L)); - snapshotsWithSizes.put(originatingTable, new SnapshotWithSize("ss2", 4096L)); - testChore.persistSnapshotSizes(table, snapshotsWithSizes); - assertEquals(2, count(table)); - assertEquals(1024L, extractSnapshotSize(table, originatingTable, "ss1")); - assertEquals(4096L, extractSnapshotSize(table, originatingTable, "ss2")); - }); - - snapshotsWithSizes.clear(); - verify(table, () -> { - snapshotsWithSizes.put(TableName.valueOf("t1"), new SnapshotWithSize("ss1", 1024L)); - snapshotsWithSizes.put(TableName.valueOf("t2"), new SnapshotWithSize("ss2", 4096L)); - snapshotsWithSizes.put(TableName.valueOf("t3"), new SnapshotWithSize("ss3", 8192L)); - testChore.persistSnapshotSizes(table, snapshotsWithSizes); - assertEquals(3, count(table)); - assertEquals(1024L, extractSnapshotSize(table, TableName.valueOf("t1"), "ss1")); - assertEquals(4096L, extractSnapshotSize(table, TableName.valueOf("t2"), "ss2")); - assertEquals(8192L, extractSnapshotSize(table, TableName.valueOf("t3"), "ss3")); - }); - } - } - - @Test public void testSnapshotsFromTables() throws Exception { TableName tn1 = helper.createTableWithRegions(1); TableName tn2 = helper.createTableWithRegions(1); @@ -260,13 +216,13 @@ public class TestSnapshotQuotaObserverChore { "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size()); // Get the size of our snapshot - Multimap snapshotsWithSize = testChore.computeSnapshotSizes( + Map namespaceSnapshotSizes = testChore.computeSnapshotSizes( snapshotsToCompute); - assertEquals(1, snapshotsWithSize.size()); - SnapshotWithSize sws = Iterables.getOnlyElement(snapshotsWithSize.get(tn1)); - assertEquals(snapshotName, sws.getName()); + assertEquals(1, namespaceSnapshotSizes.size()); + Long size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString()); + assertNotNull(size); // The snapshot should take up no space since the table refers to it completely - assertEquals(0, sws.getSize()); + assertEquals(0L, size.longValue()); // Write some more data, flush it, and then major_compact the table helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); @@ -290,71 +246,133 @@ public class TestSnapshotQuotaObserverChore { snapshotsToCompute = testChore.getSnapshotsToComputeSize(); assertEquals( "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size()); - snapshotsWithSize = testChore.computeSnapshotSizes( + namespaceSnapshotSizes = testChore.computeSnapshotSizes( snapshotsToCompute); - assertEquals(1, snapshotsWithSize.size()); - sws = Iterables.getOnlyElement(snapshotsWithSize.get(tn1)); - assertEquals(snapshotName, sws.getName()); + assertEquals(1, namespaceSnapshotSizes.size()); + size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString()); + assertNotNull(size); // The snapshot should take up the size the table originally took up - assertEquals(lastSeenSize.get().longValue(), sws.getSize()); + assertEquals(lastSeenSize.get().longValue(), size.longValue()); } @Test public void testPersistingSnapshotsForNamespaces() throws Exception { - Multimap snapshotsWithSizes = HashMultimap.create(); TableName tn1 = TableName.valueOf("ns1:tn1"); TableName tn2 = TableName.valueOf("ns1:tn2"); TableName tn3 = TableName.valueOf("ns2:tn1"); TableName tn4 = TableName.valueOf("ns2:tn2"); TableName tn5 = TableName.valueOf("tn1"); + // Shim in a custom factory to avoid computing snapshot sizes. + FileArchiverNotifierFactory test = new FileArchiverNotifierFactory() { + Map tableToSize = ImmutableMap.of( + tn1, 1024L, tn2, 1024L, tn3, 512L, tn4, 1024L, tn5, 3072L); - snapshotsWithSizes.put(tn1, new SnapshotWithSize("", 1024L)); - snapshotsWithSizes.put(tn2, new SnapshotWithSize("", 1024L)); - snapshotsWithSizes.put(tn3, new SnapshotWithSize("", 512L)); - snapshotsWithSizes.put(tn4, new SnapshotWithSize("", 1024L)); - snapshotsWithSizes.put(tn5, new SnapshotWithSize("", 3072L)); - - Map nsSizes = testChore.groupSnapshotSizesByNamespace(snapshotsWithSizes); - assertEquals(3, nsSizes.size()); - assertEquals(2048L, (long) nsSizes.get("ns1")); - assertEquals(1536L, (long) nsSizes.get("ns2")); - assertEquals(3072L, (long) nsSizes.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); + @Override + public FileArchiverNotifier get( + Connection conn, Configuration conf, FileSystem fs, TableName tn) { + return new FileArchiverNotifier() { + @Override public void addArchivedFiles(Set> fileSizes) + throws IOException {} + + @Override + public long computeAndStoreSnapshotSizes(Collection currentSnapshots) + throws IOException { + return tableToSize.get(tn); + } + }; + } + }; + try { + FileArchiverNotifierFactoryImpl.setInstance(test); + + Multimap snapshotsToCompute = HashMultimap.create(); + snapshotsToCompute.put(tn1, ""); + snapshotsToCompute.put(tn2, ""); + snapshotsToCompute.put(tn3, ""); + snapshotsToCompute.put(tn4, ""); + snapshotsToCompute.put(tn5, ""); + Map nsSizes = testChore.computeSnapshotSizes(snapshotsToCompute); + assertEquals(3, nsSizes.size()); + assertEquals(2048L, (long) nsSizes.get("ns1")); + assertEquals(1536L, (long) nsSizes.get("ns2")); + assertEquals(3072L, (long) nsSizes.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); + } finally { + FileArchiverNotifierFactoryImpl.reset(); + } } - private long count(Table t) throws IOException { - try (ResultScanner rs = t.getScanner(new Scan())) { - long sum = 0; - for (Result r : rs) { - while (r.advance()) { - sum++; + @Test + public void testBucketingFilesToSnapshots() throws Exception { + // Create a table and set a quota + TableName tn1 = helper.createTableWithRegions(1); + admin.setQuota(QuotaSettingsFactory.limitTableSpace( + tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + + // Write some data and flush it + helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); + admin.flush(tn1); + + final AtomicReference lastSeenSize = new AtomicReference<>(); + // Wait for the Master chore to run to see the usage (with a fudge factor) + TEST_UTIL.waitFor(30_000, new SpaceQuotaSnapshotPredicate(conn, tn1) { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + lastSeenSize.set(snapshot.getUsage()); + return snapshot.getUsage() > 230L * SpaceQuotaHelperForTests.ONE_KILOBYTE; + } + }); + + // Create a snapshot on the table + final String snapshotName1 = tn1 + "snapshot1"; + admin.snapshot(new SnapshotDescription(snapshotName1, tn1, SnapshotType.SKIPFLUSH)); + // Major compact the table to force a rewrite + TEST_UTIL.compact(tn1, true); + + // Make sure that the snapshot owns the size + final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME); + TEST_UTIL.waitFor(30_000, new Predicate() { + @Override + public boolean evaluate() throws Exception { + Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName1); + Result r = quotaTable.get(g); + if (r == null || r.isEmpty()) { + return false; } + r.advance(); + Cell c = r.current(); + return lastSeenSize.get() == QuotaTableUtil.parseSnapshotSize(c); } - return sum; - } - } + }); - private long extractSnapshotSize( - Table quotaTable, TableName tn, String snapshot) throws IOException { - Get g = QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot); - Result r = quotaTable.get(g); - assertNotNull(r); - CellScanner cs = r.cellScanner(); - cs.advance(); - Cell c = cs.current(); - assertNotNull(c); - return QuotaTableUtil.extractSnapshotSize( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); - } + // Create another snapshot on the table + final String snapshotName2 = tn1 + "snapshot2"; + admin.snapshot(new SnapshotDescription(snapshotName2, tn1, SnapshotType.SKIPFLUSH)); + // Major compact the table to force a rewrite + TEST_UTIL.compact(tn1, true); - private void verify(Table t, IOThrowingRunnable test) throws IOException { - admin.disableTable(t.getName()); - admin.truncateTable(t.getName(), false); - test.run(); - } + // Make sure that the snapshot owns the size + TEST_UTIL.waitFor(30_000, new Predicate() { + @Override + public boolean evaluate() throws Exception { + Get g = QuotaTableUtil.makeGetForSnapshotSize(tn1, snapshotName2); + Result r = quotaTable.get(g); + if (r == null || r.isEmpty()) { + return false; + } + r.advance(); + Cell c = r.current(); + return lastSeenSize.get() == QuotaTableUtil.parseSnapshotSize(c); + } + }); - @FunctionalInterface - private interface IOThrowingRunnable { - void run() throws IOException; + Get g = QuotaTableUtil.createGetNamespaceSnapshotSize(tn1.getNamespaceAsString()); + Result r = quotaTable.get(g); + assertNotNull(r); + assertFalse(r.isEmpty()); + r.advance(); + long size = QuotaTableUtil.parseSnapshotSize(r.current()); + // Two snapshots of equal size. + assertEquals(lastSeenSize.get() * 2, size); } /** -- 2.12.2
ROW-KEYFAM/QUALDATA
ROW-KEYFAM/QUALDATADESC
n.<namespace>q:s<global-quotas>
n.<namespace>u:p<namespace-quota policy>
n.<namespace>u:s<SpaceQuotaSnapshot>
n.<namespace>u:ss<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
t.<table>q:s<global-quotas>
t.<table>u:p<table-quota policy>
t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>
t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
u.<user>q:s<global-quotas>
u.<user>q:s.<table><table-quotas>
u.<user>q:s.<ns><namespace-quotas>