diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStats.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStats.java index 0cc990c..7e55a3c 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStats.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStats.java @@ -35,12 +35,14 @@ public class FileStoreStats implements FileStoreStatsMBean, FileStoreMonitor { public static final String SEGMENT_REPO_SIZE = "SEGMENT_REPO_SIZE"; public static final String SEGMENT_WRITES = "SEGMENT_WRITES"; public static final String JOURNAL_WRITES = "JOURNAL_WRITES"; + public static final String CLEANUP_RECLAIMED_SIZE = "CLEANUP_RECLAIMED_SIZE"; private final StatisticsProvider statisticsProvider; private final FileStore store; private final MeterStats writeStats; private final CounterStats repoSize; private final MeterStats journalWriteStats; + private final MeterStats reclaimedSizeStats; public FileStoreStats(StatisticsProvider statisticsProvider, FileStore store, long initialSize) { this.statisticsProvider = statisticsProvider; @@ -48,6 +50,7 @@ public class FileStoreStats implements FileStoreStatsMBean, FileStoreMonitor { this.writeStats = statisticsProvider.getMeter(SEGMENT_WRITES, StatsOptions.DEFAULT); this.repoSize = statisticsProvider.getCounterStats(SEGMENT_REPO_SIZE, StatsOptions.DEFAULT); this.journalWriteStats = statisticsProvider.getMeter(JOURNAL_WRITES, StatsOptions.DEFAULT); + this.reclaimedSizeStats = statisticsProvider.getMeter(CLEANUP_RECLAIMED_SIZE, StatsOptions.DEFAULT); repoSize.inc(initialSize); } @@ -62,6 +65,7 @@ public class FileStoreStats implements FileStoreStatsMBean, FileStoreMonitor { @Override public void reclaimed(long size) { repoSize.dec(size); + reclaimedSizeStats.mark(size); } @Override @@ -110,6 +114,16 @@ public class FileStoreStats implements FileStoreStatsMBean, FileStoreMonitor { public CompositeData getJournalWriteStatsAsCompositeData() { return asCompositeData(getTimeSeries(JOURNAL_WRITES), JOURNAL_WRITES); } + + @Override + public long getReclaimedSizeStatsAsCount() { + return reclaimedSizeStats.getCount(); + } + + @Override + public CompositeData getReclaimedSizeStatsAsCompositeData() { + return asCompositeData(getTimeSeries(CLEANUP_RECLAIMED_SIZE), CLEANUP_RECLAIMED_SIZE); + } private TimeSeries getTimeSeries(String name) { return statisticsProvider.getStats().getTimeSeries(name, true); diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStatsMBean.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStatsMBean.java index ba13c9a..5c68d10 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStatsMBean.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStoreStatsMBean.java @@ -50,4 +50,14 @@ public interface FileStoreStatsMBean { * @return time series of the writes to journal */ CompositeData getJournalWriteStatsAsCompositeData(); + + /** + * @return count of the bytes reclaimed ever since during cleanup + */ + long getReclaimedSizeStatsAsCount(); + + /** + * @return time series of the bytes reclaimed during cleanup + */ + CompositeData getReclaimedSizeStatsAsCompositeData(); } diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java index 42ade59..933a637 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java @@ -46,6 +46,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.FutureTask; import java.util.concurrent.ScheduledExecutorService; @@ -59,6 +60,7 @@ import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions; import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreStats; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; @@ -66,6 +68,7 @@ import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; +import org.apache.jackrabbit.oak.stats.StatisticsProvider; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -924,6 +927,52 @@ public class CompactionAndCleanupIT { fileStore.close(); } } + + /** + * Test asserting OAK-4106 : Concurrent writes during cleanup should not + * impact reclaimedSize computation. + */ + @Test + public void concurrentWritesDuringCleanup() throws Exception { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + StatisticsProvider statsProvider = new DefaultStatisticsProvider(executor); + + final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) + .withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withStatisticsProvider(statsProvider) + .withMaxFileSize(1).build(); + FileStoreStats stats = new FileStoreStats(statsProvider, fileStore, 0); + final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); + + try { + Runnable concurrentWriteTask = new Runnable() { + public void run() { + try { + NodeBuilder builder = nodeStore.getRoot().builder(); + builder.setProperty("blob" + new Random().nextInt(), createBlob(nodeStore, 512 * 512)); + + nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + fileStore.flush(); + } catch (CommitFailedException e) { + // ignore + } catch (IOException e) { + // ignore + } + }; + }; + + ExecutorService executorService = Executors.newFixedThreadPool(5); + for (int i = 0; i < 5; i++) { + executorService.execute(concurrentWriteTask); + } + + fileStore.cleanup(); + + assertTrue(stats.getReclaimedSizeStatsAsCount() >= 0); + + } finally { + fileStore.close(); + } + } private static void addContent(NodeBuilder builder) { for (int k = 0; k < 10000; k++) {