From 615d57a1ba8269bc2709d37379f1b15a1646c831 Mon Sep 17 00:00:00 2001 From: anastas Date: Thu, 13 Jul 2017 16:24:25 +0300 Subject: [PATCH] HBASE-18375: correct the protection against GCing the chunks fro pool --- .../apache/hadoop/hbase/regionserver/ChunkCreator.java | 18 +++++++++++++----- .../hadoop/hbase/regionserver/CompactionPipeline.java | 4 ++-- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java index baf9a7b..191af00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java @@ -95,7 +95,7 @@ public class ChunkCreator { } /** - * Initializes the instance of MSLABChunkCreator + * Initializes the instance of ChunkCreator * @param chunkSize the chunkSize * @param offheap indicates if the chunk is to be created offheap or not * @param globalMemStoreSize the global memstore size @@ -134,13 +134,17 @@ public class ChunkCreator { LOG.trace("The chunk pool is full. Reached maxCount= " + this.pool.getMaxCount() + ". Creating chunk onheap."); } + } else { + // put allocated from pool chunk initially into the strongChunkIdMap + this.strongChunkIdMap.put(chunk.getId(), chunk); } } if (chunk == null) { chunk = createChunk(); + // put allocated on demand chunk initially into the weakChunkIdMap + this.weakChunkIdMap.put(chunk.getId(), new WeakReference<>(chunk)); } - // put this chunk initially into the weakChunkIdMap - this.weakChunkIdMap.put(chunk.getId(), new WeakReference<>(chunk)); + // now we need to actually do the expensive memory allocation step in case of a new chunk, // else only the offset is set to the beginning of the chunk to accept allocations chunk.init(); @@ -315,11 +319,15 @@ public class ChunkCreator { Iterator iterator = chunks.iterator(); while (iterator.hasNext()) { Integer chunkId = iterator.next(); - // remove the chunks every time though they are from the pool or not - Chunk chunk = ChunkCreator.this.removeChunk(chunkId); + // translate chunk ID to chunk + Chunk chunk = ChunkCreator.this.getChunk(chunkId); if (chunk != null) { if (chunk.isFromPool() && toAdd > 0) { reclaimedChunks.add(chunk); + } else { + // remove the chunks (that are not going to pool) every time + // though they are initially from the pool or not + ChunkCreator.this.removeChunk(chunkId); } toAdd--; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index 5136f24..3da8f31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -261,6 +261,8 @@ public class CompactionPipeline { private void swapSuffix(List suffix, ImmutableSegment segment, boolean closeSegmentsInSuffix) { + pipeline.removeAll(suffix); + if(segment != null) pipeline.addLast(segment); // During index merge we won't be closing the segments undergoing the merge. Segment#close() // will release the MSLAB chunks to pool. But in case of index merge there wont be any data copy // from old MSLABs. So the new cells in new segment also refers to same chunks. In case of data @@ -272,8 +274,6 @@ public class CompactionPipeline { itemInSuffix.close(); } } - pipeline.removeAll(suffix); - if(segment != null) pipeline.addLast(segment); } // replacing one segment in the pipeline with a new one exactly at the same index -- 1.8.5.2 (Apple Git-48)