--- .../java/org/apache/hadoop/hbase/client/Admin.java | 8 + .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 44 + .../hadoop/hbase/protobuf/RequestConverter.java | 12 + .../hbase/protobuf/generated/AdminProtos.java | 1074 +++++++++++++++++++- hbase-protocol/src/main/protobuf/Admin.proto | 10 + .../hadoop/hbase/regionserver/HRegionServer.java | 10 + .../hadoop/hbase/regionserver/RSRpcServices.java | 14 + .../apache/hadoop/hbase/HBaseTestingUtility.java | 19 + .../hadoop/hbase/master/MockRegionServer.java | 9 + .../regionserver/TestClearRegionBlockCache.java | 132 +++ .../replication/regionserver/TestReplicator.java | 6 + hbase-shell/src/main/ruby/hbase/admin.rb | 6 + hbase-shell/src/main/ruby/shell.rb | 1 + .../main/ruby/shell/commands/clear_block_cache.rb | 38 + 14 files changed, 1345 insertions(+), 38 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java create mode 100644 hbase-shell/src/main/ruby/shell/commands/clear_block_cache.rb diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index fa9594a..3ee7b4e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -724,6 +724,14 @@ public interface Admin extends Abortable, Closeable { boolean isBalancerEnabled() throws IOException; /** + * Clear all the blocks corresponding to this table from block cache. + * + * @param tableName table to clear block cache + * @throws IOException if a remote or network exception occurs + */ + void clearBlockCache(final TableName tableName) throws IOException; + + /** * Invoke region normalizer. Can NOT run for various reasons. Check logs. * * @return True if region normalizer ran, false otherwise. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 84871ed..0fd2783 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -2436,6 +2437,49 @@ public class HBaseAdmin implements Admin { } /** + * {@inheritDoc} + */ + @Override + public void clearBlockCache(final TableName tableName) throws IOException { + ZooKeeperWatcher zookeeper = null; + try { + checkTableExists(tableName); + zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), + new ThrowableAbortable()); + List> pairs = + MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName); + for (Pair pair: pairs) { + if (pair.getFirst().isOffline()) continue; + if (pair.getSecond() == null) continue; + try { + clearBlockCache(pair.getSecond(), pair.getFirst()); + } catch (NotServingRegionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to clear block cache for " + pair.getFirst() + ": " + + StringUtils.stringifyException(e)); + } + } + } + } finally { + if (zookeeper != null) { + zookeeper.close(); + } + } + } + + private void clearBlockCache(final ServerName sn, final HRegionInfo hri) throws IOException { + HBaseRpcController controller = rpcControllerFactory.newController(); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); + ClearRegionBlockCacheRequest request = + RequestConverter.buildClearRegionBlockCacheRequest(hri.getRegionName()); + try { + admin.clearRegionBlockCache(controller, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** * Invoke region normalizer. Can NOT run for various reasons. Check logs. * * @return True if region normalizer ran, false otherwise. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 32bdcb9..eff4a6f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -1440,6 +1441,17 @@ public final class RequestConverter { } /** + * Creates a protocol buffer ClearRegionBlockCacheRequest + * + * @return a ClearRegionBlockCacheRequest + */ + public static ClearRegionBlockCacheRequest buildClearRegionBlockCacheRequest(final byte[] + regionName) { + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + return ClearRegionBlockCacheRequest.newBuilder().setRegion(region).build(); + } + + /** * @see {@link #buildGetClusterStatusRequest} */ private static final GetClusterStatusRequest GET_CLUSTER_STATUS_REQUEST = diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 1c59ea6..6fbbf56 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -22420,6 +22420,905 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:hbase.pb.UpdateConfigurationResponse) } + public interface ClearRegionBlockCacheRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.RegionSpecifier region = 1; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + boolean hasRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.ClearRegionBlockCacheRequest} + */ + public static final class ClearRegionBlockCacheRequest extends + com.google.protobuf.GeneratedMessage + implements ClearRegionBlockCacheRequestOrBuilder { + // Use ClearRegionBlockCacheRequest.newBuilder() to construct. + private ClearRegionBlockCacheRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ClearRegionBlockCacheRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ClearRegionBlockCacheRequest defaultInstance; + public static ClearRegionBlockCacheRequest getDefaultInstance() { + return defaultInstance; + } + + public ClearRegionBlockCacheRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClearRegionBlockCacheRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = region_.toBuilder(); + } + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(region_); + region_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearRegionBlockCacheRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClearRegionBlockCacheRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.RegionSpecifier region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + return region_; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + return region_; + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ClearRegionBlockCacheRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.RegionSpecifier region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + /** + * required .hbase.pb.RegionSpecifier region = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearRegionBlockCacheRequest) + } + + static { + defaultInstance = new ClearRegionBlockCacheRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearRegionBlockCacheRequest) + } + + public interface ClearRegionBlockCacheResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ClearRegionBlockCacheResponse} + */ + public static final class ClearRegionBlockCacheResponse extends + com.google.protobuf.GeneratedMessage + implements ClearRegionBlockCacheResponseOrBuilder { + // Use ClearRegionBlockCacheResponse.newBuilder() to construct. + private ClearRegionBlockCacheResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ClearRegionBlockCacheResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ClearRegionBlockCacheResponse defaultInstance; + public static ClearRegionBlockCacheResponse getDefaultInstance() { + return defaultInstance; + } + + public ClearRegionBlockCacheResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ClearRegionBlockCacheResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ClearRegionBlockCacheResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ClearRegionBlockCacheResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ClearRegionBlockCacheResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_hbase_pb_ClearRegionBlockCacheResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ClearRegionBlockCacheResponse) + } + + static { + defaultInstance = new ClearRegionBlockCacheResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ClearRegionBlockCacheResponse) + } + /** * Protobuf service {@code hbase.pb.AdminService} */ @@ -22564,6 +23463,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ClearRegionBlockCache(.hbase.pb.ClearRegionBlockCacheRequest) returns (.hbase.pb.ClearRegionBlockCacheResponse); + */ + public abstract void clearRegionBlockCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -22705,6 +23612,14 @@ public final class AdminProtos { impl.updateConfiguration(controller, request, done); } + @java.lang.Override + public void clearRegionBlockCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest request, + com.google.protobuf.RpcCallback done) { + impl.clearRegionBlockCache(controller, request, done); + } + }; } @@ -22761,6 +23676,8 @@ public final class AdminProtos { return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); case 16: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); + case 17: + return impl.clearRegionBlockCache(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -22809,6 +23726,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -22857,6 +23776,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23001,6 +23922,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc ClearRegionBlockCache(.hbase.pb.ClearRegionBlockCacheRequest) returns (.hbase.pb.ClearRegionBlockCacheResponse); + */ + public abstract void clearRegionBlockCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -23108,6 +24037,11 @@ public final class AdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 17: + this.clearRegionBlockCache(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -23156,6 +24090,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23204,6 +24140,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -23479,6 +24417,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance())); } + + public void clearRegionBlockCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -23571,6 +24524,11 @@ public final class AdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse clearRegionBlockCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -23783,6 +24741,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse clearRegionBlockCache( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.AdminService) @@ -23968,6 +24938,16 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_UpdateConfigurationResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ClearRegionBlockCacheRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ClearRegionBlockCacheRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ClearRegionBlockCacheResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ClearRegionBlockCacheResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -24051,44 +25031,50 @@ public final class AdminProtos { "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" + "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" + "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" + - "eConfigurationResponse2\207\013\n\014AdminService\022" + - "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" + - "oRequest\032\037.hbase.pb.GetRegionInfoRespons" + - "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" + - "eRequest\032\036.hbase.pb.GetStoreFileResponse" + - "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline", - "RegionRequest\032!.hbase.pb.GetOnlineRegion" + - "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" + - "gionRequest\032\034.hbase.pb.OpenRegionRespons" + - "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" + - "nRequest\032\036.hbase.pb.WarmupRegionResponse" + - "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" + - "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" + - "FlushRegion\022\034.hbase.pb.FlushRegionReques" + - "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" + - "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.", - "hbase.pb.SplitRegionResponse\022P\n\rCompactR" + - "egion\022\036.hbase.pb.CompactRegionRequest\032\037." + - "hbase.pb.CompactRegionResponse\022M\n\014MergeR" + - "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." + - "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" + - "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" + - "equest\032#.hbase.pb.ReplicateWALEntryRespo" + - "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" + - "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" + - "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW", - "ALWriterRequest\032\037.hbase.pb.RollWALWriter" + - "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" + - "ServerInfoRequest\032\037.hbase.pb.GetServerIn" + - "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" + - "ServerRequest\032\034.hbase.pb.StopServerRespo" + - "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" + - "ateFavoredNodesRequest\032$.hbase.pb.Update" + - "FavoredNodesResponse\022b\n\023UpdateConfigurat" + - "ion\022$.hbase.pb.UpdateConfigurationReques" + - "t\032%.hbase.pb.UpdateConfigurationResponse", - "BA\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\013AdminProtosH\001\210\001\001\240\001\001" + "eConfigurationResponse\"I\n\034ClearRegionBlo" + + "ckCacheRequest\022)\n\006region\030\001 \002(\0132\031.hbase.p" + + "b.RegionSpecifier\"\037\n\035ClearRegionBlockCac" + + "heResponse2\361\013\n\014AdminService\022P\n\rGetRegion" + + "Info\022\036.hbase.pb.GetRegionInfoRequest\032\037.h" + + "base.pb.GetRegionInfoResponse\022M\n\014GetStor", + "eFile\022\035.hbase.pb.GetStoreFileRequest\032\036.h" + + "base.pb.GetStoreFileResponse\022V\n\017GetOnlin" + + "eRegion\022 .hbase.pb.GetOnlineRegionReques" + + "t\032!.hbase.pb.GetOnlineRegionResponse\022G\n\n" + + "OpenRegion\022\033.hbase.pb.OpenRegionRequest\032" + + "\034.hbase.pb.OpenRegionResponse\022M\n\014WarmupR" + + "egion\022\035.hbase.pb.WarmupRegionRequest\032\036.h" + + "base.pb.WarmupRegionResponse\022J\n\013CloseReg" + + "ion\022\034.hbase.pb.CloseRegionRequest\032\035.hbas" + + "e.pb.CloseRegionResponse\022J\n\013FlushRegion\022", + "\034.hbase.pb.FlushRegionRequest\032\035.hbase.pb" + + ".FlushRegionResponse\022J\n\013SplitRegion\022\034.hb" + + "ase.pb.SplitRegionRequest\032\035.hbase.pb.Spl" + + "itRegionResponse\022P\n\rCompactRegion\022\036.hbas" + + "e.pb.CompactRegionRequest\032\037.hbase.pb.Com" + + "pactRegionResponse\022M\n\014MergeRegions\022\035.hba" + + "se.pb.MergeRegionsRequest\032\036.hbase.pb.Mer" + + "geRegionsResponse\022\\\n\021ReplicateWALEntry\022\"" + + ".hbase.pb.ReplicateWALEntryRequest\032#.hba" + + "se.pb.ReplicateWALEntryResponse\022Q\n\006Repla", + "y\022\".hbase.pb.ReplicateWALEntryRequest\032#." + + "hbase.pb.ReplicateWALEntryResponse\022P\n\rRo" + + "llWALWriter\022\036.hbase.pb.RollWALWriterRequ" + + "est\032\037.hbase.pb.RollWALWriterResponse\022P\n\r" + + "GetServerInfo\022\036.hbase.pb.GetServerInfoRe" + + "quest\032\037.hbase.pb.GetServerInfoResponse\022G" + + "\n\nStopServer\022\033.hbase.pb.StopServerReques" + + "t\032\034.hbase.pb.StopServerResponse\022_\n\022Updat" + + "eFavoredNodes\022#.hbase.pb.UpdateFavoredNo" + + "desRequest\032$.hbase.pb.UpdateFavoredNodes", + "Response\022b\n\023UpdateConfiguration\022$.hbase." + + "pb.UpdateConfigurationRequest\032%.hbase.pb" + + ".UpdateConfigurationResponse\022h\n\025ClearReg" + + "ionBlockCache\022&.hbase.pb.ClearRegionBloc" + + "kCacheRequest\032\'.hbase.pb.ClearRegionBloc" + + "kCacheResponseBA\n*org.apache.hadoop.hbas" + + "e.protobuf.generatedB\013AdminProtosH\001\210\001\001\240\001" + + "\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24311,6 +25297,18 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_UpdateConfigurationResponse_descriptor, new java.lang.String[] { }); + internal_static_hbase_pb_ClearRegionBlockCacheRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_hbase_pb_ClearRegionBlockCacheRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ClearRegionBlockCacheRequest_descriptor, + new java.lang.String[] { "Region", }); + internal_static_hbase_pb_ClearRegionBlockCacheResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_hbase_pb_ClearRegionBlockCacheResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ClearRegionBlockCacheResponse_descriptor, + new java.lang.String[] { }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index a1905a4..19881e9 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -256,6 +256,13 @@ message UpdateConfigurationRequest { message UpdateConfigurationResponse { } +message ClearRegionBlockCacheRequest { + required RegionSpecifier region = 1; +} + +message ClearRegionBlockCacheResponse { +} + service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); @@ -307,4 +314,7 @@ service AdminService { rpc UpdateConfiguration(UpdateConfigurationRequest) returns(UpdateConfigurationResponse); + + rpc ClearRegionBlockCache(ClearRegionBlockCacheRequest) + returns(ClearRegionBlockCacheResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 32bab6b..a915fbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -3478,6 +3478,16 @@ public class HRegionServer extends HasThread implements configurationManager.notifyAllObservers(conf); } + public void clearRegionBlockCache(Region region) { + CacheConfig cacheConfig = this.getCacheConfig(); + + for(Store store : region.getStores()) { + for(StoreFile hFile : store.getStorefiles()) { + cacheConfig.getBlockCache().evictBlocksByHfileName(hFile.getPath().getName()); + } + } + } + @Override public HeapMemoryManager getHeapMemoryManager() { return hMemManager; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index cd73e32..37b920a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -108,6 +108,8 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -3165,4 +3167,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } return UpdateConfigurationResponse.getDefaultInstance(); } + + @Override + public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, + ClearRegionBlockCacheRequest request) + throws ServiceException { + try { + this.regionServer.clearRegionBlockCache(getRegion(request.getRegion())); + } catch (Exception e) { + throw new ServiceException(e); + } + return ClearRegionBlockCacheResponse.newBuilder().build(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index f60be66..2443899 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -99,6 +99,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.security.HBaseKerberosUtils; @@ -4337,4 +4338,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath()); return kdc; } + + public int getNumHFiles(final TableName tableName, final byte[] family) { + int numHFiles = 0; + for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) { + numHFiles += getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, + family); + } + return numHFiles; + } + + public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName, + final byte[] family) { + int numHFiles = 0; + for (Region region : rs.getOnlineRegions(tableName)) { + numHFiles += region.getStore(family).getStorefilesCount(); + } + return numHFiles; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 14bd2fd..9a8b495 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -52,6 +52,8 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -649,6 +651,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, + ClearRegionBlockCacheRequest request) + throws ServiceException { + return null; + } + + @Override public HeapMemoryManager getHeapMemoryManager() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java new file mode 100644 index 0000000..466850d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java @@ -0,0 +1,132 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.ArrayList; + +import static org.junit.Assert.assertEquals; + +@Category(MediumTests.class) +@RunWith(Parameterized.class) +public class TestClearRegionBlockCache { + private static final TableName TABLE_NAME = TableName.valueOf("testClearRegionBlockCache"); + private static final byte[] FAMILY = Bytes.toBytes("family"); + private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("5") }; + private static final int NUM_MASTERS = 1; + private static final int NUM_RS = 2; + + private final HBaseTestingUtility htu = new HBaseTestingUtility(); + + private HTable table; + private HRegionServer rs1, rs2; + private MiniHBaseCluster cluster; + private Admin hbaseAdmin; + + @Parameterized.Parameter public String cacheType; + + @Parameterized.Parameters(name = "{index}: {0}") + public static Object[] data() { + return new Object[] { "lru", "bucket" }; + } + + @Before + public void setup() throws Exception { + if (cacheType.equals("bucket")) { + htu.getConfiguration().set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); + htu.getConfiguration().setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 30); + } + + cluster = htu.startMiniCluster(NUM_MASTERS, NUM_RS); + rs1 = cluster.getRegionServer(0); + rs2 = cluster.getRegionServer(1); + + // Create table + HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); + desc.addFamily(new HColumnDescriptor(FAMILY)); + hbaseAdmin = cluster.getMaster().getConnection().getAdmin(); + hbaseAdmin.createTable(desc, SPLIT_KEY); + table = new HTable(htu.getConfiguration(), TABLE_NAME); + } + + @After + public void teardown() throws Exception { + hbaseAdmin.close(); + htu.shutdownMiniCluster(); + } + + @Test + public void testClearBlockCache() throws Exception { + htu.loadNumericRows(table, FAMILY, 1, 10); + htu.flush(TABLE_NAME); + + BlockCache blockCache1 = rs1.getCacheConfig().getBlockCache(); + BlockCache blockCache2 = rs2.getCacheConfig().getBlockCache(); + + long initialBlockCount1 = blockCache1.getBlockCount(); + long initialBlockCount2 = blockCache2.getBlockCount(); + + // scan will cause blocks to be added in BlockCache + scanAllRegionsForRS(rs1); + assertEquals(blockCache1.getBlockCount() - initialBlockCount1, + htu.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); + clearRegionBlockCache(rs1); + + scanAllRegionsForRS(rs2); + assertEquals(blockCache2.getBlockCount() - initialBlockCount2, + htu.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); + clearRegionBlockCache(rs2); + + assertEquals(initialBlockCount1, blockCache1.getBlockCount()); + assertEquals(initialBlockCount2, blockCache2.getBlockCount()); + } + + private void scanAllRegionsForRS(HRegionServer rs) throws IOException { + for (Region region : rs.getOnlineRegions(TABLE_NAME)) { + RegionScanner scanner = region.getScanner(new Scan()); + while (scanner.next(new ArrayList())); + } + } + + private void clearRegionBlockCache(HRegionServer rs) { + for (Region region : rs.getOnlineRegions(TABLE_NAME)) { + rs.clearRegionBlockCache(region); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index 6d15a1b..19509fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -382,6 +382,12 @@ public class TestReplicator extends TestReplicationBase { UpdateConfigurationRequest request) throws ServiceException { return delegate.updateConfiguration(controller, request); } + + @Override + public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, + ClearRegionBlockCacheRequest request) throws ServiceException { + return delegate.clearRegionBlockCache(controller, request); + } } public class FailureInjectingReplicatorForTest extends ReplicatorForTest { diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 502abe2..fdc01cf 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -170,6 +170,12 @@ module Hbase end #---------------------------------------------------------------------------------------------- + # Requests clear block cache for table + def clear_block_cache(table_name) + @admin.clearBlockCache(org.apache.hadoop.hbase.TableName.valueOf(table_name)) + end + + #---------------------------------------------------------------------------------------------- # Requests region normalization for all configured tables in the cluster # Returns true if normalizer ran successfully def normalize() diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 99adf73..d73626a 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -344,6 +344,7 @@ Shell.load_command_group( trace splitormerge_switch splitormerge_enabled + clear_block_cache ], # TODO remove older hlog_roll command :aliases => { diff --git a/hbase-shell/src/main/ruby/shell/commands/clear_block_cache.rb b/hbase-shell/src/main/ruby/shell/commands/clear_block_cache.rb new file mode 100644 index 0000000..387dc96 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/clear_block_cache.rb @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ClearBlockCache < Command + def help + return <<-EOF +Clear all the blocks corresponding to this table from block cache. +For example: + + hbase> clear_block_cache 'TABLENAME' + +EOF + end + def command(table_name) + format_simple_command do + admin.clear_block_cache(table_name) + end + end + end + end +end -- 2.10.1 (Apple Git-78)