--- .../java/org/apache/hadoop/hbase/client/Admin.java | 8 + .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 44 + .../hadoop/hbase/protobuf/RequestConverter.java | 12 + .../hbase/protobuf/generated/AdminProtos.java | 1073 +++++++++++++++++++- .../hbase/protobuf/generated/ClientProtos.java | 2 +- .../hbase/protobuf/generated/MasterProtos.java | 30 +- .../hbase/protobuf/generated/SnapshotProtos.java | 24 +- hbase-protocol/src/main/protobuf/Admin.proto | 10 + .../hadoop/hbase/regionserver/HRegionServer.java | 10 + .../hadoop/hbase/regionserver/RSRpcServices.java | 14 + .../apache/hadoop/hbase/HBaseTestingUtility.java | 18 + .../hadoop/hbase/TestRefreshRegionHFiles.java | 98 ++ .../hadoop/hbase/master/MockRegionServer.java | 9 + .../replication/regionserver/TestReplicator.java | 7 + hbase-shell/src/main/ruby/hbase/admin.rb | 6 + hbase-shell/src/main/ruby/shell.rb | 1 + .../src/main/ruby/shell/commands/refresh_hfiles.rb | 38 + 17 files changed, 1338 insertions(+), 66 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshRegionHFiles.java create mode 100644 hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 5b3744a..f4591dc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -725,6 +725,14 @@ public interface Admin extends Abortable, Closeable { boolean isBalancerEnabled() throws IOException; /** + * Refresh HFiles for the table + * + * @param tableName table to refresh HFiles for + * @throws IOException if a remote or network exception occurs + */ + void refreshHFiles(final TableName tableName) throws IOException; + + /** * Invoke region normalizer. Can NOT run for various reasons. Check logs. * * @return True if region normalizer ran, false otherwise. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 2cbeb9a..4f99764 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; @@ -2435,6 +2436,49 @@ public class HBaseAdmin implements Admin { } /** + * {@inheritDoc} + */ + @Override + public void refreshHFiles(final TableName tableName) throws IOException { + ZooKeeperWatcher zookeeper = null; + try { + checkTableExists(tableName); + zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), + new ThrowableAbortable()); + List> pairs = + MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName); + for (Pair pair: pairs) { + if (pair.getFirst().isOffline()) continue; + if (pair.getSecond() == null) continue; + try { + refreshHFiles(pair.getSecond(), pair.getFirst()); + } catch (NotServingRegionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to refresh HFiles for " + pair.getFirst() + ": " + + StringUtils.stringifyException(e)); + } + } + } + } finally { + if (zookeeper != null) { + zookeeper.close(); + } + } + } + + private void refreshHFiles(final ServerName sn, final HRegionInfo hri) throws IOException { + HBaseRpcController controller = rpcControllerFactory.newController(); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); + RefreshRegionHFilesRequest request = + RequestConverter.buildRefreshRegionHFilesRequest(hri.getRegionName()); + try { + admin.refreshRegionHFiles(controller, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** * Invoke region normalizer. Can NOT run for various reasons. Check logs. * * @return True if region normalizer ran, false otherwise. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 8163130..8a7fdad 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileReques import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; @@ -1438,6 +1439,17 @@ public final class RequestConverter { } /** + * Creates a protocol buffer RefreshRegionHFilesRequest + * + * @return a RefreshRegionHFilesRequest + */ + public static RefreshRegionHFilesRequest buildRefreshRegionHFilesRequest(final byte[] + regionName) { + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + return RefreshRegionHFilesRequest.newBuilder().setRegion(region).build(); + } + + /** * @see {@link #buildGetClusterStatusRequest} */ private static final GetClusterStatusRequest GET_CLUSTER_STATUS_REQUEST = diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 1c59ea6..eb6faba 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -22420,6 +22420,905 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:hbase.pb.UpdateConfigurationResponse) } + public interface RefreshRegionHFilesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.RegionSpecifier region = 1; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.RefreshRegionHFilesRequest} + */ + public static final class RefreshRegionHFilesRequest extends + com.google.protobuf.GeneratedMessage + implements RefreshRegionHFilesRequestOrBuilder { + // Use RefreshRegionHFilesRequest.newBuilder() to construct. + private RefreshRegionHFilesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RefreshRegionHFilesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RefreshRegionHFilesRequest defaultInstance; + public static RefreshRegionHFilesRequest getDefaultInstance() { + return defaultInstance; + } + + public RefreshRegionHFilesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RefreshRegionHFilesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RefreshRegionHFilesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RefreshRegionHFilesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_; + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RefreshRegionHFilesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.RegionSpecifier region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RefreshRegionHFilesRequest) + } + + static { + defaultInstance = new RefreshRegionHFilesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RefreshRegionHFilesRequest) + } + + public interface RefreshRegionHFilesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.RefreshRegionHFilesResponse} + */ + public static final class RefreshRegionHFilesResponse extends + com.google.protobuf.GeneratedMessage + implements RefreshRegionHFilesResponseOrBuilder { + // Use RefreshRegionHFilesResponse.newBuilder() to construct. + private RefreshRegionHFilesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RefreshRegionHFilesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RefreshRegionHFilesResponse defaultInstance; + public static RefreshRegionHFilesResponse getDefaultInstance() { + return defaultInstance; + } + + public RefreshRegionHFilesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RefreshRegionHFilesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RefreshRegionHFilesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RefreshRegionHFilesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RefreshRegionHFilesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_RefreshRegionHFilesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RefreshRegionHFilesResponse) + } + + static { + defaultInstance = new RefreshRegionHFilesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RefreshRegionHFilesResponse) + } + /** * Protobuf service {@code hbase.pb.AdminService} */ @@ -22564,6 +23463,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc RefreshRegionHFiles(.hbase.pb.RefreshRegionHFilesRequest) returns (.hbase.pb.RefreshRegionHFilesResponse); + */ + public abstract void refreshRegionHFiles( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -22705,6 +23612,14 @@ public final class AdminProtos { impl.updateConfiguration(controller, request, done); } + @java.lang.Override + public void refreshRegionHFiles( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest request, + com.google.protobuf.RpcCallback done) { + impl.refreshRegionHFiles(controller, request, done); + } + }; } @@ -22761,6 +23676,8 @@ public final class AdminProtos { return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); case 16: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); + case 17: + return impl.refreshRegionHFiles(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -22809,6 +23726,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -22857,6 +23776,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23001,6 +23922,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc RefreshRegionHFiles(.hbase.pb.RefreshRegionHFilesRequest) returns (.hbase.pb.RefreshRegionHFilesResponse); + */ + public abstract void refreshRegionHFiles( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -23108,6 +24037,11 @@ public final class AdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 17: + this.refreshRegionHFiles(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -23156,6 +24090,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23204,6 +24140,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23479,6 +24417,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance())); } + + public void refreshRegionHFiles( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -23571,6 +24524,11 @@ public final class AdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse refreshRegionHFiles( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -23783,6 +24741,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse refreshRegionHFiles( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.AdminService) @@ -23968,6 +24938,16 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RefreshRegionHFilesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RefreshRegionHFilesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RefreshRegionHFilesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RefreshRegionHFilesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -24051,44 +25031,49 @@ public final class AdminProtos { "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" + "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" + "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" + - "eConfigurationResponse2\207\013\n\014AdminService\022" + - "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" + - "oRequest\032\037.hbase.pb.GetRegionInfoRespons" + - "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" + - "eRequest\032\036.hbase.pb.GetStoreFileResponse" + - "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline", - "RegionRequest\032!.hbase.pb.GetOnlineRegion" + - "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" + - "gionRequest\032\034.hbase.pb.OpenRegionRespons" + - "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" + - "nRequest\032\036.hbase.pb.WarmupRegionResponse" + - "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" + - "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" + - "FlushRegion\022\034.hbase.pb.FlushRegionReques" + - "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" + - "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.", - "hbase.pb.SplitRegionResponse\022P\n\rCompactR" + - "egion\022\036.hbase.pb.CompactRegionRequest\032\037." + - "hbase.pb.CompactRegionResponse\022M\n\014MergeR" + - "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." + - "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" + - "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" + - "equest\032#.hbase.pb.ReplicateWALEntryRespo" + - "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" + - "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" + - "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW", - "ALWriterRequest\032\037.hbase.pb.RollWALWriter" + - "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" + - "ServerInfoRequest\032\037.hbase.pb.GetServerIn" + - "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" + - "ServerRequest\032\034.hbase.pb.StopServerRespo" + - "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" + - "ateFavoredNodesRequest\032$.hbase.pb.Update" + - "FavoredNodesResponse\022b\n\023UpdateConfigurat" + - "ion\022$.hbase.pb.UpdateConfigurationReques" + - "t\032%.hbase.pb.UpdateConfigurationResponse", - "BA\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\013AdminProtosH\001\210\001\001\240\001\001" + "eConfigurationResponse\"G\n\032RefreshRegionH" + + "FilesRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." + + "RegionSpecifier\"\035\n\033RefreshRegionHFilesRe" + + "sponse2\353\013\n\014AdminService\022P\n\rGetRegionInfo" + + "\022\036.hbase.pb.GetRegionInfoRequest\032\037.hbase" + + ".pb.GetRegionInfoResponse\022M\n\014GetStoreFil", + "e\022\035.hbase.pb.GetStoreFileRequest\032\036.hbase" + + ".pb.GetStoreFileResponse\022V\n\017GetOnlineReg" + + "ion\022 .hbase.pb.GetOnlineRegionRequest\032!." + + "hbase.pb.GetOnlineRegionResponse\022G\n\nOpen" + + "Region\022\033.hbase.pb.OpenRegionRequest\032\034.hb" + + "ase.pb.OpenRegionResponse\022M\n\014WarmupRegio" + + "n\022\035.hbase.pb.WarmupRegionRequest\032\036.hbase" + + ".pb.WarmupRegionResponse\022J\n\013CloseRegion\022" + + "\034.hbase.pb.CloseRegionRequest\032\035.hbase.pb" + + ".CloseRegionResponse\022J\n\013FlushRegion\022\034.hb", + "ase.pb.FlushRegionRequest\032\035.hbase.pb.Flu" + + "shRegionResponse\022J\n\013SplitRegion\022\034.hbase." + + "pb.SplitRegionRequest\032\035.hbase.pb.SplitRe" + + "gionResponse\022P\n\rCompactRegion\022\036.hbase.pb" + + ".CompactRegionRequest\032\037.hbase.pb.Compact" + + "RegionResponse\022M\n\014MergeRegions\022\035.hbase.p" + + "b.MergeRegionsRequest\032\036.hbase.pb.MergeRe" + + "gionsResponse\022\\\n\021ReplicateWALEntry\022\".hba" + + "se.pb.ReplicateWALEntryRequest\032#.hbase.p" + + "b.ReplicateWALEntryResponse\022Q\n\006Replay\022\".", + "hbase.pb.ReplicateWALEntryRequest\032#.hbas" + + "e.pb.ReplicateWALEntryResponse\022P\n\rRollWA" + + "LWriter\022\036.hbase.pb.RollWALWriterRequest\032" + + "\037.hbase.pb.RollWALWriterResponse\022P\n\rGetS" + + "erverInfo\022\036.hbase.pb.GetServerInfoReques" + + "t\032\037.hbase.pb.GetServerInfoResponse\022G\n\nSt" + + "opServer\022\033.hbase.pb.StopServerRequest\032\034." + + "hbase.pb.StopServerResponse\022_\n\022UpdateFav" + + "oredNodes\022#.hbase.pb.UpdateFavoredNodesR" + + "equest\032$.hbase.pb.UpdateFavoredNodesResp", + "onse\022b\n\023UpdateConfiguration\022$.hbase.pb.U" + + "pdateConfigurationRequest\032%.hbase.pb.Upd" + + "ateConfigurationResponse\022b\n\023RefreshRegio" + + "nHFiles\022$.hbase.pb.RefreshRegionHFilesRe" + + "quest\032%.hbase.pb.RefreshRegionHFilesResp" + + "onseBA\n*org.apache.hadoop.hbase.protobuf" + + ".generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24311,6 +25296,18 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_RefreshRegionHFilesRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_hbase_pb_RefreshRegionHFilesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RefreshRegionHFilesRequest_descriptor, + new java.lang.String[] { "Region", }); + internal_static_hbase_pb_RefreshRegionHFilesResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_hbase_pb_RefreshRegionHFilesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RefreshRegionHFilesResponse_descriptor, + new java.lang.String[] { }); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 12950f7..4bd3f5f 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -22646,7 +22646,7 @@ public final class ClientProtos { * */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder> getCursorFieldBuilder() { if (cursorBuilder_ == null) { cursorBuilder_ = new com.google.protobuf.SingleFieldBuilder< diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 2661dc1..793c7b6 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -36539,7 +36539,7 @@ public final class MasterProtos { public final boolean isInitialized() { if (!hasCleanerChoreRan()) { - + return false; } return true; @@ -36981,7 +36981,7 @@ public final class MasterProtos { public final boolean isInitialized() { if (!hasOn()) { - + return false; } return true; @@ -38195,7 +38195,7 @@ public final class MasterProtos { public final boolean isInitialized() { if (!hasValue()) { - + return false; } return true; @@ -38801,7 +38801,7 @@ public final class MasterProtos { * required .hbase.pb.SnapshotDescription snapshot = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotFieldBuilder() { if (snapshotBuilder_ == null) { snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -39612,7 +39612,7 @@ public final class MasterProtos { /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; */ - java.util.List + java.util.List getSnapshotsList(); /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; @@ -39625,7 +39625,7 @@ public final class MasterProtos { /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; */ - java.util.List + java.util.List getSnapshotsOrBuilderList(); /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; @@ -39746,7 +39746,7 @@ public final class MasterProtos { /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; */ - public java.util.List + public java.util.List getSnapshotsOrBuilderList() { return snapshots_; } @@ -40270,7 +40270,7 @@ public final class MasterProtos { /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; */ - public java.util.List + public java.util.List getSnapshotsOrBuilderList() { if (snapshotsBuilder_ != null) { return snapshotsBuilder_.getMessageOrBuilderList(); @@ -40296,12 +40296,12 @@ public final class MasterProtos { /** * repeated .hbase.pb.SnapshotDescription snapshots = 1; */ - public java.util.List + public java.util.List getSnapshotsBuilderList() { return getSnapshotsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotsFieldBuilder() { if (snapshotsBuilder_ == null) { snapshotsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< @@ -40863,7 +40863,7 @@ public final class MasterProtos { * required .hbase.pb.SnapshotDescription snapshot = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotFieldBuilder() { if (snapshotBuilder_ == null) { snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -41819,7 +41819,7 @@ public final class MasterProtos { * required .hbase.pb.SnapshotDescription snapshot = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotFieldBuilder() { if (snapshotBuilder_ == null) { snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -42757,7 +42757,7 @@ public final class MasterProtos { * optional .hbase.pb.SnapshotDescription snapshot = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotFieldBuilder() { if (snapshotBuilder_ == null) { snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -43404,7 +43404,7 @@ public final class MasterProtos { * optional .hbase.pb.SnapshotDescription snapshot = 2; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotFieldBuilder() { if (snapshotBuilder_ == null) { snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -43961,7 +43961,7 @@ public final class MasterProtos { * optional .hbase.pb.SnapshotDescription snapshot = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> getSnapshotFieldBuilder() { if (snapshotBuilder_ == null) { snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java index 2e11b4a..0b54505 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java @@ -365,7 +365,7 @@ public final class SnapshotProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -381,7 +381,7 @@ public final class SnapshotProtos { getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; @@ -416,7 +416,7 @@ public final class SnapshotProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -436,7 +436,7 @@ public final class SnapshotProtos { getTableBytes() { java.lang.Object ref = table_; if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); table_ = b; @@ -511,7 +511,7 @@ public final class SnapshotProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -527,7 +527,7 @@ public final class SnapshotProtos { getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; @@ -989,12 +989,12 @@ public final class SnapshotProtos { public final boolean isInitialized() { if (!hasName()) { - + return false; } if (hasUsersAndPermissions()) { if (!getUsersAndPermissions().isInitialized()) { - + return false; } } @@ -1049,7 +1049,7 @@ public final class SnapshotProtos { getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; @@ -1135,7 +1135,7 @@ public final class SnapshotProtos { getTableBytes() { java.lang.Object ref = table_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); table_ = b; @@ -1323,7 +1323,7 @@ public final class SnapshotProtos { getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; @@ -1472,7 +1472,7 @@ public final class SnapshotProtos { * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> getUsersAndPermissionsFieldBuilder() { if (usersAndPermissionsBuilder_ == null) { usersAndPermissionsBuilder_ = new com.google.protobuf.SingleFieldBuilder< diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index a1905a4..48c153b 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -256,6 +256,13 @@ message UpdateConfigurationRequest { message UpdateConfigurationResponse { } +message RefreshRegionHFilesRequest { + required RegionSpecifier region = 1; +} + +message RefreshRegionHFilesResponse { +} + service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); @@ -307,4 +314,7 @@ service AdminService { rpc UpdateConfiguration(UpdateConfigurationRequest) returns(UpdateConfigurationResponse); + + rpc RefreshRegionHFiles(RefreshRegionHFilesRequest) + returns(RefreshRegionHFilesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 107e1c3..a51f5bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -3478,6 +3478,16 @@ public class HRegionServer extends HasThread implements configurationManager.notifyAllObservers(conf); } + public void refreshRegionHFiles(Region region) { + try { + for (Store store : region.getStores()) { + store.refreshStoreFiles(); + } + } catch(IOException ioe) { + LOG.warn("Exception while trying to refresh store files: ", ioe); + } + } + @Override public HeapMemoryManager getHeapMemoryManager() { return hMemManager; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 4071fed..a4a8f34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -128,6 +128,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; @@ -3131,4 +3133,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } return UpdateConfigurationResponse.getDefaultInstance(); } + + @Override + public RefreshRegionHFilesResponse refreshRegionHFiles(RpcController controller, + RefreshRegionHFilesRequest request) + throws ServiceException { + try { + this.regionServer.refreshRegionHFiles(getRegion(request.getRegion())); + } catch (Exception e) { + throw new ServiceException(e); + } + return RefreshRegionHFilesResponse.newBuilder().build(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index f60be66..1070501 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -4337,4 +4337,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath()); return kdc; } + + public int getNumHFiles(final TableName tableName, final byte[] family) { + int numHFiles = 0; + for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) { + numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, + family); + } + return numHFiles; + } + + public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName, + final byte[] family) { + int numHFiles = 0; + for (Region region : rs.getOnlineRegions(tableName)) { + numHFiles += region.getStore(family).getStorefilesCount(); + } + return numHFiles; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshRegionHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshRegionHFiles.java new file mode 100644 index 0000000..975be53 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshRegionHFiles.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.HFileTestUtil; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +@Category(MediumTests.class) +public class TestRefreshRegionHFiles { + private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); + private static final int NUM_MASTER = 1; + private static final int NUM_RS = 2; + private static final TableName TABLE_NAME = TableName.valueOf("testRefreshRegionHFiles"); + private static final byte[] FAMILY = Bytes.toBytes("family"); + private static final byte[] QUALIFIER = Bytes.toBytes("qualifier"); + private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("10") }; + private static final int NUM_ROWS = 5; + private static final String HFILE_NAME = "123abcdef"; + + private static MiniHBaseCluster cluster; + private static HRegionServer rs1, rs2; + private static HTableDescriptor desc; + private static Admin hbaseAdmin; + private static HTable table; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + cluster = HTU.startMiniCluster(NUM_MASTER, NUM_RS); + rs1 = cluster.getRegionServer(0); + rs2 = cluster.getRegionServer(1); + + // Create table + desc = new HTableDescriptor(TABLE_NAME); + desc.addFamily(new HColumnDescriptor(FAMILY)); + hbaseAdmin = cluster.getMaster().getConnection().getAdmin(); + hbaseAdmin.createTable(desc, SPLIT_KEY); + table = new HTable(HTU.getConfiguration(), TABLE_NAME); + + // this will create 2 regions spread across slaves + HTU.loadNumericRows(table, FAMILY, 1, 20); + HTU.flush(TABLE_NAME); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + HTU.shutdownMiniCluster(); + } + + @Test + public void testRefreshRegionHFiles() throws Exception { + Path tableDir = desc.getTableDir(HTU.getDefaultRootDirPath(), TABLE_NAME.toBytes()); + for (Region region : cluster.getRegions(TABLE_NAME)) { + Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); + Path familyDir = new Path(regionDir, Bytes.toString(FAMILY)); + HFileTestUtil + .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY, + QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS); + } + assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY)); + refreshRegionHFiles(rs1); + refreshRegionHFiles(rs2); + assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY)); + } + + private void refreshRegionHFiles(HRegionServer rs) { + for (Region region : rs.getOnlineRegions(TABLE_NAME)) { + rs.refreshRegionHFiles(region); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 14bd2fd..b87de29 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -70,6 +70,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsReques import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RefreshRegionHFilesResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; @@ -649,6 +651,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public RefreshRegionHFilesResponse refreshRegionHFiles(RpcController controller, + RefreshRegionHFilesRequest request) + throws ServiceException { + return null; + } + + @Override public HeapMemoryManager getHeapMemoryManager() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index 6d15a1b..15efe31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -382,6 +382,13 @@ public class TestReplicator extends TestReplicationBase { UpdateConfigurationRequest request) throws ServiceException { return delegate.updateConfiguration(controller, request); } + + @Override + public RefreshRegionHFilesResponse refreshRegionHFiles(RpcController controller, + RefreshRegionHFilesRequest request) + throws ServiceException { + return delegate.refreshRegionHFiles(controller, request); + } } public class FailureInjectingReplicatorForTest extends ReplicatorForTest { diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 502abe2..9cf5e75 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -170,6 +170,12 @@ module Hbase end #---------------------------------------------------------------------------------------------- + # Refresh HFiles for the table + def refresh_hfiles(table_name) + @admin.refreshHFiles(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + end + + #---------------------------------------------------------------------------------------------- # Requests region normalization for all configured tables in the cluster # Returns true if normalizer ran successfully def normalize() diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 99adf73..9370473 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -344,6 +344,7 @@ Shell.load_command_group( trace splitormerge_switch splitormerge_enabled + refresh_hfiles ], # TODO remove older hlog_roll command :aliases => { diff --git a/hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb b/hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb new file mode 100644 index 0000000..a361fde --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class RefreshHfiles < Command + def help + return <<-EOF +Refresh HFiles for the table. +For example: + + hbase> refresh_hfiles 'TABLENAME' + +EOF + end + def command(table_name) + format_simple_command do + admin.refresh_hfiles(table_name) + end + end + end + end +end -- 2.10.1 (Apple Git-78)