diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 333b7ef799..75b971379f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -384,6 +384,8 @@ public class BucketCache implements BlockCache, HeapSize { return new ByteBufferIOEngine(capacity); } else if (ioEngineName.startsWith("mmap:")) { return new FileMmapEngine(ioEngineName.substring(5), capacity); + } else if (ioEngineName.startsWith("pmem:")) { + return new PmemIOEngine(ioEngineName.substring(5), capacity); } else { throw new IllegalArgumentException( "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java index 82f42cda2a..84cd6742f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java @@ -44,9 +44,9 @@ import org.apache.hadoop.util.StringUtils; public class FileMmapEngine implements IOEngine { static final Logger LOG = LoggerFactory.getLogger(FileMmapEngine.class); - private final String path; - private long size; - private ByteBufferArray bufferArray; + protected final String path; + protected long size; + protected ByteBufferArray bufferArray; private final FileChannel fileChannel; private RandomAccessFile raf = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PmemIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PmemIOEngine.java new file mode 100644 index 0000000000..067269a7a1 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PmemIOEngine.java @@ -0,0 +1,71 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.io.hfile.bucket; + +import java.io.IOException; + +import org.apache.hadoop.hbase.io.hfile.Cacheable; +import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; +import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; +import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * IO engine that stores data in pmem devices such as DCPMM. + */ +@InterfaceAudience.Private +public class PmemIOEngine extends FileMmapEngine { + + // TODO this will support only one path over Pmem. To make use of multiple Pmem devices mounted, + // we need to support multiple paths like files IOEngine. Support later. + public PmemIOEngine(String filePath, long capacity) throws IOException { + super(filePath, capacity); + } + + @Override + public Cacheable read(long offset, int length, CacheableDeserializer deserializer) + throws IOException { + ByteBuff dstBuffer = bufferArray.asSubByteBuff(offset, length); + // Here the buffer that is created directly refers to the buffer in the actual buckets. + // When any cell is referring to the blocks created out of these buckets then it means that + // those cells are referring to a shared memory area which if evicted by the BucketCache would + // lead to corruption of results. Hence we set the type of the buffer as SHARED_MEMORY + // so that the readers using this block are aware of this fact and do the necessary action + // to prevent eviction till the results are either consumed or copied + return deserializer.deserialize(dstBuffer, true, MemoryType.SHARED); + } + + @Override + public boolean usesSharedMemory() { + return true; + } + + @Override + public boolean isPersistent() { + // TODO for now its been compared with off heap IOEngine. Next step is to test and use/add the + // persistence. + return false; + } + + @Override + public String toString() { + return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path + ", size=" + + String.format("%,d", this.size); + } +}