From c686aedb5146348eae48557492b2c7bb327f7231 Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Fri, 28 Aug 2015 16:13:36 -0700 Subject: [PATCH] HBASE-14334 Move Memcached block cache in to it's own optional module. --- hbase-assembly/pom.xml | 5 + .../src/main/assembly/hadoop-two-compat.xml | 1 + hbase-external-blockcache/pom.xml | 382 +++++++++++++++++++++ .../hadoop/hbase/io/hfile/MemcachedBlockCache.java | 282 +++++++++++++++ hbase-server/pom.xml | 6 +- .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 16 +- .../hadoop/hbase/io/hfile/MemcachedBlockCache.java | 282 --------------- pom.xml | 6 + 8 files changed, 691 insertions(+), 289 deletions(-) create mode 100644 hbase-external-blockcache/pom.xml create mode 100644 hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 69c4989..4851391 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -186,6 +186,11 @@ ${project.version} + org.apache.hbase + hbase-external-blockcache + ${project.version} + + org.apache.hbase hbase-testing-util ${project.version} diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml index ba28251..9ef624c 100644 --- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml @@ -46,6 +46,7 @@ org.apache.hbase:hbase-server org.apache.hbase:hbase-shell org.apache.hbase:hbase-thrift + org.apache.hbase:hbase-external-blockcache diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml new file mode 100644 index 0000000..a46f1a5 --- /dev/null +++ b/hbase-external-blockcache/pom.xml @@ -0,0 +1,382 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 2.0.0-SNAPSHOT + .. + + hbase-external-blockcache + Apache HBase - External Block Cache + + HBase module that provides out of process block cache. + Currently Memcached is the reference implementation for external block cache. + + External block caches allow HBase to take advantage of other more complex caches that can live + longer than the HBase regionserver process and are not necessarily tied to a single computer + life time. However external block caches add in extra operational overhead. + + + + + + src/main/resources/ + + hbase-default.xml + + + + + + src/test/resources/META-INF/ + META-INF/ + + NOTICE + + true + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + + true + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + versionInfo-source + generate-sources + + add-source + + + + ${project.build.directory}/generated-sources/java + + + + + + + maven-surefire-plugin + + + + listener + org.apache.hadoop.hbase.ResourceCheckerJUnitListener + + + + + + + secondPartTestsExecution + test + + test + + + true + + + + + + + org.apache.maven.plugins + maven-source-plugin + + true + + src/main/java + ${project.build.outputDirectory}/META-INF + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-antrun-plugin + [${maven.antrun.version}] + + run + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + + + + + + org.apache.hbase + hbase-server + + + net.spy + spymemcached + true + + + + + + + apache-release + + + + org.apache.maven.plugins + maven-resources-plugin + + + license-javadocs + prepare-package + + copy-resources + + + ${project.build.directory}/apidocs + + + src/main/javadoc/META-INF/ + META-INF/ + + NOTICE + + true + + + + + + + + + + + + skipCommonTests + + + skipCommonTests + + + + true + + + + + + hadoop-1.1 + + + + hadoop.profile1.1 + + + + + org.apache.hadoop + hadoop-core + + + + + + + hadoop-1.0 + + + hadoop.profile + 1.0 + + + + + org.apache.hadoop + hadoop-core + + + commons-io + commons-io + + + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-common + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + 3.0-SNAPSHOT + + + + org.apache.hadoop + hadoop-common + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java new file mode 100644 index 0000000..f820193 --- /dev/null +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -0,0 +1,282 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hbase.io.hfile; + +import net.spy.memcached.CachedData; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.FailureMode; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.transcoders.Transcoder; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.hbase.nio.SingleByteBuff; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.htrace.Trace; +import org.apache.htrace.TraceScope; + + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.ExecutionException; + +/** + * Class to store blocks into memcached. + * This should only be used on a cluster of Memcached daemons that are tuned well and have a + * good network connection to the HBase regionservers. Any other use will likely slow down HBase + * greatly. + */ +@InterfaceAudience.Private +public class MemcachedBlockCache implements BlockCache { + private static final Log LOG = LogFactory.getLog(MemcachedBlockCache.class.getName()); + + // Some memcache versions won't take more than 1024 * 1024. So set the limit below + // that just in case this client is used with those versions. + public static final int MAX_SIZE = 1020 * 1024; + + // Config key for what memcached servers to use. + // They should be specified in a comma sperated list with ports. + // like: + // + // host1:11211,host3:8080,host4:11211 + public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers"; + public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout"; + public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout"; + public static final long MEMCACHED_DEFAULT_TIMEOUT = 500; + + private final MemcachedClient client; + private final HFileBlockTranscoder tc = new HFileBlockTranscoder(); + private final CacheStats cacheStats = new CacheStats("MemcachedBlockCache"); + + public MemcachedBlockCache(Configuration c) throws IOException { + LOG.info("Creating MemcachedBlockCache"); + + long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT); + long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT); + + ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder() + .setOpTimeout(opTimeout) + .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out + .setFailureMode(FailureMode.Redistribute) + .setShouldOptimize(true) // When regions move lots of reads happen together + // So combining them into single requests is nice. + .setDaemon(true) // Don't keep threads around past the end of days. + .setUseNagleAlgorithm(false) // Ain't nobody got time for that + .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // 4 times larger than the + // default block just in case + + + // Assume only the localhost is serving memecached. + // A la mcrouter or co-locating memcached with split regionservers. + // + // If this config is a pool of memecached servers they will all be used according to the + // default hashing scheme defined by the memcache client. Spy Memecache client in this + // case. + String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211"); + String[] servers = serverListString.split(","); + List serverAddresses = new ArrayList(servers.length); + for (String s:servers) { + serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s)); + } + + client = new MemcachedClient(builder.build(), serverAddresses); + } + + @Override + public void cacheBlock(BlockCacheKey cacheKey, + Cacheable buf, + boolean inMemory, + boolean cacheDataInL1) { + cacheBlock(cacheKey, buf); + } + + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { + if (buf instanceof HFileBlock) { + client.add(cacheKey.toString(), MAX_SIZE, (HFileBlock) buf, tc); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("MemcachedBlockCache can not cache Cacheable's of type " + + buf.getClass().toString()); + } + } + } + + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, + boolean repeat, boolean updateCacheMetrics) { + // Assume that nothing is the block cache + HFileBlock result = null; + + try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) { + result = client.get(cacheKey.toString(), tc); + } catch (Exception e) { + // Catch a pretty broad set of exceptions to limit any changes in the memecache client + // and how it handles failures from leaking into the read path. + if (LOG.isDebugEnabled()) { + LOG.debug("Exception pulling from memcached [ " + + cacheKey.toString() + + " ]. Treating as a miss.", e); + } + result = null; + } finally { + // Update stats if this request doesn't have it turned off 100% of the time + if (updateCacheMetrics) { + if (result == null) { + cacheStats.miss(caching, cacheKey.isPrimary()); + } else { + cacheStats.hit(caching, cacheKey.isPrimary()); + } + } + } + + + return result; + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + try { + cacheStats.evict(); + return client.delete(cacheKey.toString()).get(); + } catch (InterruptedException e) { + LOG.warn("Error deleting " + cacheKey.toString(), e); + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Error deleting " + cacheKey.toString(), e); + } + } + return false; + } + + /** + * This method does nothing so that memcached can handle all evictions. + */ + @Override + public int evictBlocksByHfileName(String hfileName) { + return 0; + } + + @Override + public CacheStats getStats() { + return cacheStats; + } + + @Override + public void shutdown() { + client.shutdown(); + } + + @Override + public long size() { + return 0; + } + + @Override + public long getFreeSize() { + return 0; + } + + @Override + public long getCurrentSize() { + return 0; + } + + @Override + public long getBlockCount() { + return 0; + } + + @Override + public Iterator iterator() { + return new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public CachedBlock next() { + throw new NoSuchElementException("MemcachedBlockCache can't iterate over blocks."); + } + + @Override + public void remove() { + + } + }; + } + + @Override + public BlockCache[] getBlockCaches() { + return null; + } + + /** + * Class to encode and decode an HFileBlock to and from memecached's resulting byte arrays. + */ + private static class HFileBlockTranscoder implements Transcoder { + + @Override + public boolean asyncDecode(CachedData d) { + return false; + } + + @Override + public CachedData encode(HFileBlock block) { + ByteBuffer bb = ByteBuffer.allocate(block.getSerializedLength()); + block.serialize(bb); + return new CachedData(0, bb.array(), CachedData.MAX_SIZE); + } + + @Override + public HFileBlock decode(CachedData d) { + try { + ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(d.getData())); + return (HFileBlock) HFileBlock.blockDeserializer.deserialize(buf, true, + MemoryType.EXCLUSIVE); + } catch (IOException e) { + LOG.warn("Error deserializing data from memcached",e); + } + return null; + } + + @Override + public int getMaxSize() { + return MAX_SIZE; + } + } + + @Override + public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { + // Not doing reference counting. All blocks here are EXCLUSIVE + } + +} diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 412582c..2152c19 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -535,11 +535,7 @@ io.netty netty-all - - net.spy - spymemcached - true - + org.apache.htrace diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 7b4f530..d6bdec0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -139,9 +139,16 @@ public class CacheConfig { * This is used for config. */ private static enum ExternalBlockCaches { - memcached(MemcachedBlockCache.class); + memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); // TODO(eclark): Consider more. Redis, etc. Class clazz; + ExternalBlockCaches(String clazzName) { + try { + clazz = (Class) Class.forName(clazzName); + } catch (ClassNotFoundException cnef) { + clazz = null; + } + } ExternalBlockCaches(Class clazz) { this.clazz = clazz; } @@ -572,7 +579,12 @@ public class CacheConfig { try { klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; } catch (IllegalArgumentException exception) { - klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, MemcachedBlockCache.class); + try { + klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( + "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); + } catch (ClassNotFoundException e) { + return null; + } } // Now try and create an instance of the block cache. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java deleted file mode 100644 index f820193..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ /dev/null @@ -1,282 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hadoop.hbase.io.hfile; - -import net.spy.memcached.CachedData; -import net.spy.memcached.ConnectionFactoryBuilder; -import net.spy.memcached.FailureMode; -import net.spy.memcached.MemcachedClient; -import net.spy.memcached.transcoders.Transcoder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; -import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.nio.SingleByteBuff; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; - - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.concurrent.ExecutionException; - -/** - * Class to store blocks into memcached. - * This should only be used on a cluster of Memcached daemons that are tuned well and have a - * good network connection to the HBase regionservers. Any other use will likely slow down HBase - * greatly. - */ -@InterfaceAudience.Private -public class MemcachedBlockCache implements BlockCache { - private static final Log LOG = LogFactory.getLog(MemcachedBlockCache.class.getName()); - - // Some memcache versions won't take more than 1024 * 1024. So set the limit below - // that just in case this client is used with those versions. - public static final int MAX_SIZE = 1020 * 1024; - - // Config key for what memcached servers to use. - // They should be specified in a comma sperated list with ports. - // like: - // - // host1:11211,host3:8080,host4:11211 - public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers"; - public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout"; - public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout"; - public static final long MEMCACHED_DEFAULT_TIMEOUT = 500; - - private final MemcachedClient client; - private final HFileBlockTranscoder tc = new HFileBlockTranscoder(); - private final CacheStats cacheStats = new CacheStats("MemcachedBlockCache"); - - public MemcachedBlockCache(Configuration c) throws IOException { - LOG.info("Creating MemcachedBlockCache"); - - long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT); - long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT); - - ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder() - .setOpTimeout(opTimeout) - .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out - .setFailureMode(FailureMode.Redistribute) - .setShouldOptimize(true) // When regions move lots of reads happen together - // So combining them into single requests is nice. - .setDaemon(true) // Don't keep threads around past the end of days. - .setUseNagleAlgorithm(false) // Ain't nobody got time for that - .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // 4 times larger than the - // default block just in case - - - // Assume only the localhost is serving memecached. - // A la mcrouter or co-locating memcached with split regionservers. - // - // If this config is a pool of memecached servers they will all be used according to the - // default hashing scheme defined by the memcache client. Spy Memecache client in this - // case. - String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211"); - String[] servers = serverListString.split(","); - List serverAddresses = new ArrayList(servers.length); - for (String s:servers) { - serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s)); - } - - client = new MemcachedClient(builder.build(), serverAddresses); - } - - @Override - public void cacheBlock(BlockCacheKey cacheKey, - Cacheable buf, - boolean inMemory, - boolean cacheDataInL1) { - cacheBlock(cacheKey, buf); - } - - @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { - if (buf instanceof HFileBlock) { - client.add(cacheKey.toString(), MAX_SIZE, (HFileBlock) buf, tc); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("MemcachedBlockCache can not cache Cacheable's of type " - + buf.getClass().toString()); - } - } - } - - @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { - // Assume that nothing is the block cache - HFileBlock result = null; - - try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) { - result = client.get(cacheKey.toString(), tc); - } catch (Exception e) { - // Catch a pretty broad set of exceptions to limit any changes in the memecache client - // and how it handles failures from leaking into the read path. - if (LOG.isDebugEnabled()) { - LOG.debug("Exception pulling from memcached [ " - + cacheKey.toString() - + " ]. Treating as a miss.", e); - } - result = null; - } finally { - // Update stats if this request doesn't have it turned off 100% of the time - if (updateCacheMetrics) { - if (result == null) { - cacheStats.miss(caching, cacheKey.isPrimary()); - } else { - cacheStats.hit(caching, cacheKey.isPrimary()); - } - } - } - - - return result; - } - - @Override - public boolean evictBlock(BlockCacheKey cacheKey) { - try { - cacheStats.evict(); - return client.delete(cacheKey.toString()).get(); - } catch (InterruptedException e) { - LOG.warn("Error deleting " + cacheKey.toString(), e); - Thread.currentThread().interrupt(); - } catch (ExecutionException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Error deleting " + cacheKey.toString(), e); - } - } - return false; - } - - /** - * This method does nothing so that memcached can handle all evictions. - */ - @Override - public int evictBlocksByHfileName(String hfileName) { - return 0; - } - - @Override - public CacheStats getStats() { - return cacheStats; - } - - @Override - public void shutdown() { - client.shutdown(); - } - - @Override - public long size() { - return 0; - } - - @Override - public long getFreeSize() { - return 0; - } - - @Override - public long getCurrentSize() { - return 0; - } - - @Override - public long getBlockCount() { - return 0; - } - - @Override - public Iterator iterator() { - return new Iterator() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public CachedBlock next() { - throw new NoSuchElementException("MemcachedBlockCache can't iterate over blocks."); - } - - @Override - public void remove() { - - } - }; - } - - @Override - public BlockCache[] getBlockCaches() { - return null; - } - - /** - * Class to encode and decode an HFileBlock to and from memecached's resulting byte arrays. - */ - private static class HFileBlockTranscoder implements Transcoder { - - @Override - public boolean asyncDecode(CachedData d) { - return false; - } - - @Override - public CachedData encode(HFileBlock block) { - ByteBuffer bb = ByteBuffer.allocate(block.getSerializedLength()); - block.serialize(bb); - return new CachedData(0, bb.array(), CachedData.MAX_SIZE); - } - - @Override - public HFileBlock decode(CachedData d) { - try { - ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(d.getData())); - return (HFileBlock) HFileBlock.blockDeserializer.deserialize(buf, true, - MemoryType.EXCLUSIVE); - } catch (IOException e) { - LOG.warn("Error deserializing data from memcached",e); - } - return null; - } - - @Override - public int getMaxSize() { - return MAX_SIZE; - } - } - - @Override - public void returnBlock(BlockCacheKey cacheKey, Cacheable block) { - // Not doing reference counting. All blocks here are EXCLUSIVE - } - -} diff --git a/pom.xml b/pom.xml index de1f5e7..46cd59e 100644 --- a/pom.xml +++ b/pom.xml @@ -67,6 +67,7 @@ hbase-annotations hbase-rest hbase-checkstyle + hbase-external-blockcache hbase-shaded hbase-spark @@ -1435,6 +1436,11 @@ ${project.version} + org.apache.hbase + hbase-external-blockcache + ${project.version} + + hbase-it org.apache.hbase ${project.version} -- 2.5.0