diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java index df0ae8e..c02ec18 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java @@ -78,7 +78,7 @@ } public MultiByteBuff(Recycler recycler, ByteBuffer... items) { - this(new RefCnt(recycler), items); + this(RefCnt.create(recycler), items); } MultiByteBuff(RefCnt refCnt, ByteBuffer... items) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/RefCnt.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/RefCnt.java index 018c8b4..b880658 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/RefCnt.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/RefCnt.java @@ -22,6 +22,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.io.netty.util.AbstractReferenceCounted; +import org.apache.hbase.thirdparty.io.netty.util.Recycler.Handle; import org.apache.hbase.thirdparty.io.netty.util.ReferenceCounted; /** @@ -32,6 +33,19 @@ public class RefCnt extends AbstractReferenceCounted { private Recycler recycler = ByteBuffAllocator.NONE; + private final Handle handle; + + private static final org.apache.hbase.thirdparty.io.netty.util.Recycler RECYCLER = + new org.apache.hbase.thirdparty.io.netty.util.Recycler() { + @Override + protected RefCnt newObject(Handle handle) { + return new RefCnt(handle); + } + }; + + private RefCnt(Handle handle) { + this.handle = handle; + } /** * Create an {@link RefCnt} with an initial reference count = 1. If the reference count become @@ -40,20 +54,20 @@ * needed to track on heap ByteBuff. */ public static RefCnt create() { - return new RefCnt(ByteBuffAllocator.NONE); + return create(ByteBuffAllocator.NONE); } public static RefCnt create(Recycler recycler) { - return new RefCnt(recycler); - } - - public RefCnt(Recycler recycler) { - this.recycler = recycler; + RefCnt entry = RECYCLER.get(); + entry.setRefCnt(1); + entry.recycler = recycler; + return entry; } @Override protected final void deallocate() { this.recycler.free(); + handle.recycle(this); } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java index 797bfdc..f72695e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java @@ -55,7 +55,7 @@ } public SingleByteBuff(Recycler recycler, ByteBuffer buf) { - this(new RefCnt(recycler), buf); + this(RefCnt.create(recycler), buf); } SingleByteBuff(RefCnt refCnt, ByteBuffer buf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 99abfea..13d2bae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -588,6 +588,7 @@ if (backingMap.remove(cacheKey, be)) { blockEvicted(cacheKey, be, !existed); cacheStats.evicted(be.getCachedTime(), cacheKey.isPrimary()); + be.recycle(); } return null; }); @@ -1357,8 +1358,8 @@ boolean succ = false; BucketEntry bucketEntry = null; try { - bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory, RefCnt.create(recycler), - getByteBuffAllocator()); + bucketEntry = BucketEntry.newInstance(offset, len, accessCounter, inMemory, + RefCnt.create(recycler), getByteBuffAllocator()); bucketEntry.setDeserializerReference(data.getDeserializer()); if (data instanceof HFileBlock) { // If an instance of HFileBlock, save on some allocations. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index 2dd7775..2f9a992 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -37,6 +37,10 @@ import org.apache.hadoop.hbase.util.IdReadWriteLock; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.io.netty.util.Recycler; +import org.apache.hbase.thirdparty.io.netty.util.Recycler.Handle; + /** * Item in cache. We expect this to be where most memory goes. Java uses 8 bytes just for object * headers; after this, we want to use as little as possible - so we only use 8 bytes, but in order @@ -78,28 +82,64 @@ * 4. The read RPC patch shipped the response, and release the block. then refCnt--;
* Once the refCnt decrease to zero, then the {@link BucketAllocator} will free the block area. */ - private final RefCnt refCnt; - final AtomicBoolean markedAsEvicted; - final ByteBuffAllocator allocator; + private RefCnt refCnt; + AtomicBoolean markedAsEvicted; + ByteBuffAllocator allocator; + private final Handle handle; /** * Time this block was cached. Presumes we are created just before we are added to the cache. */ private final long cachedTime = System.nanoTime(); - BucketEntry(long offset, int length, long accessCounter, boolean inMemory) { - this(offset, length, accessCounter, inMemory, RefCnt.create(), ByteBuffAllocator.HEAP); + private static final Recycler RECYCLER = new Recycler() { + @Override + protected BucketEntry newObject(Handle handle) { + return new BucketEntry(handle); + } + }; + + public void recycle() { + if (handle != null) { + handle.recycle(this); + } } - BucketEntry(long offset, int length, long accessCounter, boolean inMemory, RefCnt refCnt, - ByteBuffAllocator allocator) { - setOffset(offset); + public static BucketEntry newInstance(long offset, int length, long accessCounter, + boolean inMemory) { + return newInstance(offset, length, accessCounter, inMemory, RefCnt.create(), + ByteBuffAllocator.HEAP); + } + + public static BucketEntry newInstance(long offset, int length, long accessCounter, + boolean inMemory, RefCnt refCnt, ByteBuffAllocator allocator) { + BucketEntry entry = RECYCLER.get(); + entry.setOffset(offset); + entry.length = length; + entry.accessCounter = accessCounter; + entry.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.MULTI; + entry.refCnt = refCnt; + entry.markedAsEvicted.set(false); + entry.allocator = allocator; + entry.deserializerIndex = 0; + return entry; + } + + private BucketEntry(Handle handle) { + this.handle = handle; + this.markedAsEvicted = new AtomicBoolean(false); + } + + @VisibleForTesting + public BucketEntry(long offset, int length, long accessCounter, boolean inMemory) { + this.handle = null; + this.setOffset(offset); this.length = length; this.accessCounter = accessCounter; this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.MULTI; - this.refCnt = refCnt; + this.refCnt = RefCnt.create(); this.markedAsEvicted = new AtomicBoolean(false); - this.allocator = allocator; + this.allocator = ByteBuffAllocator.HEAP; } long offset() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index 69b8370..391c595 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -133,7 +133,7 @@ BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), protoKey.getOffset(), protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); BucketCacheProtos.BucketEntry protoValue = entry.getValue(); - BucketEntry value = new BucketEntry( + BucketEntry value = BucketEntry.newInstance( protoValue.getOffset(), protoValue.getLength(), protoValue.getAccessCounter(),