diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index cb23ca9..3e9c376 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.IdReadWriteLock;
+import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
@@ -185,9 +186,11 @@ public class BucketCache implements BlockCache, HeapSize {
/**
* A ReentrantReadWriteLock to lock on a particular block identified by offset.
* The purpose of this is to avoid freeing the block which is being read.
+ *
+ * Key set of offsets in BucketCache is limited so soft reference is the best choice here.
*/
@VisibleForTesting
- final IdReadWriteLock offsetLock = new IdReadWriteLock();
+ final IdReadWriteLock offsetLock = new IdReadWriteLock(ReferenceType.SOFT);
private final NavigableSet blocksByHFile =
new ConcurrentSkipListSet<>(new Comparator() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
index deb2265..2a83029 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.util;
+import java.lang.ref.Reference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -44,16 +45,48 @@ import com.google.common.annotations.VisibleForTesting;
public class IdReadWriteLock {
// The number of lock we want to easily support. It's not a maximum.
private static final int NB_CONCURRENT_LOCKS = 1000;
- // The pool to get entry from, entries are mapped by soft reference and will be
- // automatically garbage-collected when JVM memory pressure is high
- private final ObjectPool lockPool =
- new SoftObjectPool<>(
- new ObjectPool.ObjectFactory() {
- @Override
- public ReentrantReadWriteLock createObject(Long id) {
- return new ReentrantReadWriteLock();
- }
- }, NB_CONCURRENT_LOCKS);
+ /**
+ * The pool to get entry from, entries are mapped by {@link Reference} and will be automatically
+ * garbage-collected by JVM
+ */
+ private final ObjectPool lockPool;
+ private final ReferenceType refType;
+
+ public IdReadWriteLock() {
+ this(ReferenceType.WEAK);
+ }
+
+ /**
+ * Constructor of IdReadWriteLock
+ * @param referenceType type of the reference used in lock pool, {@link ReferenceType#WEAK} by
+ * default. Use {@link ReferenceType#SOFT} if the key set is limited and the locks will
+ * be reused with a high frequency
+ */
+ public IdReadWriteLock(ReferenceType referenceType) {
+ this.refType = referenceType;
+ switch (referenceType) {
+ case SOFT:
+ lockPool = new SoftObjectPool<>(new ObjectPool.ObjectFactory() {
+ @Override
+ public ReentrantReadWriteLock createObject(Long id) {
+ return new ReentrantReadWriteLock();
+ }
+ }, NB_CONCURRENT_LOCKS);
+ break;
+ case WEAK:
+ default:
+ lockPool = new WeakObjectPool<>(new ObjectPool.ObjectFactory() {
+ @Override
+ public ReentrantReadWriteLock createObject(Long id) {
+ return new ReentrantReadWriteLock();
+ }
+ }, NB_CONCURRENT_LOCKS);
+ }
+ }
+
+ public static enum ReferenceType {
+ WEAK, SOFT
+ }
/**
* Get the ReentrantReadWriteLock corresponding to the given id
@@ -93,4 +126,9 @@ public class IdReadWriteLock {
Thread.sleep(50);
}
}
+
+ @VisibleForTesting
+ public ReferenceType getReferenceType() {
+ return this.refType;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index dee36e8..5a29731 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -27,7 +27,6 @@ import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.locks.Lock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -36,7 +35,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
// imports for classes still in regionserver.wal
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.IdReadWriteLock;
+import org.apache.hadoop.hbase.util.IdLock;
/**
* A WAL Provider that returns a WAL per group of regions.
@@ -132,7 +131,7 @@ public class RegionGroupingProvider implements WALProvider {
/** A group-provider mapping, make sure one-one rather than many-one mapping */
private final ConcurrentMap cached = new ConcurrentHashMap<>();
- private final IdReadWriteLock createLock = new IdReadWriteLock();
+ private final IdLock createLock = new IdLock();
private RegionGroupingStrategy strategy = null;
private WALFactory factory = null;
@@ -181,16 +180,18 @@ public class RegionGroupingProvider implements WALProvider {
private WAL getWAL(final String group) throws IOException {
WALProvider provider = cached.get(group);
if (provider == null) {
- Lock lock = createLock.getLock(group.hashCode()).writeLock();
- lock.lock();
+ IdLock.Entry lockEntry = null;
try {
+ lockEntry = createLock.getLockEntry(group.hashCode());
provider = cached.get(group);
if (provider == null) {
provider = createProvider(group);
cached.put(group, provider);
}
} finally {
- lock.unlock();
+ if (lockEntry != null) {
+ createLock.releaseLockEntry(lockEntry);
+ }
}
}
return provider.getWAL(null, null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
index 295816f..7dd2a63 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
+import java.util.Arrays;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.Callable;
@@ -38,9 +39,13 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+@RunWith(Parameterized.class)
@Category({MiscTests.class, MediumTests.class})
// Medium as it creates 100 threads; seems better to run it isolated
public class TestIdReadWriteLock {
@@ -51,7 +56,14 @@ public class TestIdReadWriteLock {
private static final int NUM_THREADS = 128;
private static final int NUM_SECONDS = 15;
- private IdReadWriteLock idLock = new IdReadWriteLock();
+ @Parameterized.Parameter
+ public IdReadWriteLock idLock;
+
+ @Parameterized.Parameters
+ public static Iterable