diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java index 2a352fd..4256251 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java @@ -69,10 +69,6 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import javax.annotation.Nullable; -import com.google.common.base.Function; -import com.google.common.base.Predicate; -import com.google.common.base.Stopwatch; -import com.google.common.base.Supplier; import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean; import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; import org.apache.jackrabbit.oak.segment.BinaryReferenceConsumer; @@ -100,6 +96,12 @@ import org.apache.jackrabbit.oak.spi.state.NodeState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Function; +import com.google.common.base.Predicate; +import com.google.common.base.Stopwatch; +import com.google.common.base.Supplier; +import com.google.common.collect.ImmutableList; + /** * The storage implementation for tar files. */ @@ -198,11 +200,6 @@ public class FileStore implements SegmentStore, Closeable { private final GCListener gcListener; /** - * Represents the approximate size on disk of the repository. - */ - private final AtomicLong approximateSize; - - /** * This flag is periodically updated by calling the {@code SegmentGCOptions} * at regular intervals. */ @@ -290,7 +287,6 @@ public class FileStore implements SegmentStore, Closeable { } long initialSize = size(); - this.approximateSize = new AtomicLong(initialSize); this.stats = new FileStoreStats(builder.getStatsProvider(), this, initialSize); if (!readOnly) { @@ -577,17 +573,24 @@ public class FileStore implements SegmentStore, Closeable { return dataFiles; } - public final long size() { + final long size() { + List readersSnapshot = null; + long writeFileSnapshotSize = 0; + fileStoreLock.readLock().lock(); try { - long size = writeFile != null ? writeFile.length() : 0; - for (TarReader reader : readers) { - size += reader.size(); - } - return size; + readersSnapshot = ImmutableList.copyOf(readers); + writeFileSnapshotSize = writeFile != null ? writeFile.length() : 0; } finally { fileStoreLock.readLock().unlock(); } + + long size = writeFileSnapshotSize; + for (TarReader reader : readersSnapshot) { + size += reader.size(); + } + + return size; } public int readerCount(){ @@ -790,7 +793,6 @@ public class FileStore implements SegmentStore, Closeable { } long finalSize = size(); - approximateSize.set(finalSize); stats.reclaimed(initialSize - finalSize); // FIXME OAK-4106: Reclaimed size reported by FileStore.cleanup is off gcListener.cleaned(initialSize - finalSize, finalSize); @@ -1285,7 +1287,6 @@ public class FileStore implements SegmentStore, Closeable { if (size >= maxFileSize) { newWriter(); } - approximateSize.addAndGet(TarWriter.BLOCK_SIZE + length + TarWriter.getPaddingSize(length)); } finally { fileStoreLock.writeLock().unlock(); } @@ -1386,7 +1387,7 @@ public class FileStore implements SegmentStore, Closeable { } private void checkDiskSpace() { - long repositoryDiskSpace = approximateSize.get(); + long repositoryDiskSpace = size(); long availableDiskSpace = directory.getFreeSpace(); boolean updated = gcOptions.isDiskSpaceSufficient(repositoryDiskSpace, availableDiskSpace); boolean previous = sufficientDiskSpace.getAndSet(updated); diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java index b002b9f..42ade59 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java @@ -46,7 +46,9 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; import java.util.concurrent.FutureTask; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -63,6 +65,7 @@ import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; import org.apache.jackrabbit.oak.spi.state.NodeStore; +import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -88,8 +91,10 @@ public class CompactionAndCleanupIT { @Test public void compactionNoBinaryClone() throws IOException, CommitFailedException { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) .withGCOptions(defaultGCOptions().setRetainedGenerations(2)) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) .withMaxFileSize(1) .build(); SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); @@ -110,7 +115,7 @@ public class CompactionAndCleanupIT { nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size1 = fileStore.size(); + long size1 = fileStore.getStats().getApproximateSize(); log.debug("File store size {}", byteCountToDisplaySize(size1)); // Create a property with 5 MB blob @@ -119,7 +124,7 @@ public class CompactionAndCleanupIT { nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size2 = fileStore.size(); + long size2 = fileStore.getStats().getApproximateSize(); assertSize("1st blob added", size2, size1 + blobSize, size1 + blobSize + (blobSize / 100)); // Now remove the property. No gc yet -> size doesn't shrink @@ -128,14 +133,14 @@ public class CompactionAndCleanupIT { nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size3 = fileStore.size(); + long size3 = fileStore.getStats().getApproximateSize(); assertSize("1st blob removed", size3, size2, size2 + 4096); // 1st gc cycle -> no reclaimable garbage... fileStore.compact(); fileStore.cleanup(); - long size4 = fileStore.size(); + long size4 = fileStore.getStats().getApproximateSize(); assertSize("1st gc", size4, size3, size3 + size1); // Add another 5MB binary doubling the blob size @@ -144,21 +149,21 @@ public class CompactionAndCleanupIT { nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size5 = fileStore.size(); + long size5 = fileStore.getStats().getApproximateSize(); assertSize("2nd blob added", size5, size4 + blobSize, size4 + blobSize + (blobSize / 100)); // 2st gc cycle -> 1st blob should get collected fileStore.compact(); fileStore.cleanup(); - long size6 = fileStore.size(); + long size6 = fileStore.getStats().getApproximateSize(); assertSize("2nd gc", size6, size5 - blobSize - size1, size5 - blobSize); // 3rtd gc cycle -> no significant change fileStore.compact(); fileStore.cleanup(); - long size7 = fileStore.size(); + long size7 = fileStore.getStats().getApproximateSize(); assertSize("3rd gc", size7, size6 * 10/11 , size6 * 10/9); // No data loss @@ -174,9 +179,11 @@ public class CompactionAndCleanupIT { public void offlineCompaction() throws IOException, CommitFailedException { SegmentGCOptions gcOptions = defaultGCOptions().setOffline(); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) .withMaxFileSize(1) .withGCOptions(gcOptions) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) .build(); SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); @@ -196,7 +203,7 @@ public class CompactionAndCleanupIT { nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size1 = fileStore.size(); + long size1 = fileStore.getStats().getApproximateSize(); log.debug("File store size {}", byteCountToDisplaySize(size1)); // Create a property with 5 MB blob @@ -205,7 +212,7 @@ public class CompactionAndCleanupIT { nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size2 = fileStore.size(); + long size2 = fileStore.getStats().getApproximateSize(); assertSize("1st blob added", size2, size1 + blobSize, size1 + blobSize + (blobSize / 100)); // Now remove the property. No gc yet -> size doesn't shrink @@ -214,14 +221,14 @@ public class CompactionAndCleanupIT { nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size3 = fileStore.size(); + long size3 = fileStore.getStats().getApproximateSize(); assertSize("1st blob removed", size3, size2, size2 + 4096); // 1st gc cycle -> 1st blob should get collected fileStore.compact(); fileStore.cleanup(); - long size4 = fileStore.size(); + long size4 = fileStore.getStats().getApproximateSize(); assertSize("1st gc", size4, size3 - blobSize - size1, size3 - blobSize); @@ -231,21 +238,21 @@ public class CompactionAndCleanupIT { nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); fileStore.flush(); - long size5 = fileStore.size(); + long size5 = fileStore.getStats().getApproximateSize(); assertSize("2nd blob added", size5, size4 + blobSize, size4 + blobSize + (blobSize / 100)); // 2st gc cycle -> 2nd blob should *not* be collected fileStore.compact(); fileStore.cleanup(); - long size6 = fileStore.size(); + long size6 = fileStore.getStats().getApproximateSize(); assertSize("2nd gc", size6, size5 * 10/11, size5 * 10/9); // 3rd gc cycle -> no significant change fileStore.compact(); fileStore.cleanup(); - long size7 = fileStore.size(); + long size7 = fileStore.getStats().getApproximateSize(); assertSize("3rd gc", size7, size6 * 10/11 , size6 * 10/9); // No data loss @@ -265,9 +272,11 @@ public class CompactionAndCleanupIT { public void offlineCompactionCps() throws IOException, CommitFailedException { SegmentGCOptions gcOptions = defaultGCOptions().setOffline(); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) .withMaxFileSize(1) .withGCOptions(gcOptions) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) .build(); SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); try { @@ -285,7 +294,7 @@ public class CompactionAndCleanupIT { fileStore.compact(); fileStore.cleanup(); // Compacts to 548Kb - long size0 = fileStore.size(); + long size0 = fileStore.getStats().getApproximateSize(); int cpNo = 4; Set cps = new HashSet(); @@ -297,11 +306,11 @@ public class CompactionAndCleanupIT { assertTrue(nodeStore.retrieve(cp) != null); } - long size1 = fileStore.size(); + long size1 = fileStore.getStats().getApproximateSize(); assertSize("with checkpoints added", size1, size0, size0 * 11 / 10); fileStore.compact(); fileStore.cleanup(); - long size2 = fileStore.size(); + long size2 = fileStore.getStats().getApproximateSize(); assertSize("with checkpoints compacted", size2, size1 * 9/10, size1 * 11 / 10); } finally { fileStore.close(); @@ -317,8 +326,12 @@ public class CompactionAndCleanupIT { CommitFailedException { SegmentGCOptions gcOptions = defaultGCOptions().setOffline() .withBinaryDeduplication(); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) - .withMaxFileSize(1).withGCOptions(gcOptions).build(); + .withMaxFileSize(1) + .withGCOptions(gcOptions) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) + .build(); SegmentNodeStore nodeStore = SegmentNodeStoreBuilders .builder(fileStore).build(); @@ -349,10 +362,10 @@ public class CompactionAndCleanupIT { assertTrue(nodeStore.retrieve(cp) != null); } - long size1 = fileStore.size(); + long size1 = fileStore.getStats().getApproximateSize(); fileStore.compact(); fileStore.cleanup(); - long size2 = fileStore.size(); + long size2 = fileStore.getStats().getApproximateSize(); assertSize("with compacted binaries", size2, 0, size1 - blobSize); } finally { fileStore.close(); @@ -372,8 +385,12 @@ public class CompactionAndCleanupIT { SegmentGCOptions gcOptions = defaultGCOptions().setOffline() .withBinaryDeduplication() .setBinaryDeduplicationMaxSize(blobSize / 2); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) - .withMaxFileSize(1).withGCOptions(gcOptions).build(); + .withMaxFileSize(1) + .withGCOptions(gcOptions) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) + .build(); SegmentNodeStore nodeStore = SegmentNodeStoreBuilders .builder(fileStore).build(); @@ -403,10 +420,10 @@ public class CompactionAndCleanupIT { assertTrue(nodeStore.retrieve(cp) != null); } - long size1 = fileStore.size(); + long size1 = fileStore.getStats().getApproximateSize(); fileStore.compact(); fileStore.cleanup(); - long size2 = fileStore.size(); + long size2 = fileStore.getStats().getApproximateSize(); // not expected to reduce the size too much, as the binaries are // above the threshold @@ -425,8 +442,12 @@ public class CompactionAndCleanupIT { public void offlineCompactionBinR1() throws IOException, CommitFailedException { SegmentGCOptions gcOptions = defaultGCOptions().setOffline(); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStore fileStore = fileStoreBuilder(getFileStoreFolder()) - .withMaxFileSize(1).withGCOptions(gcOptions).build(); + .withMaxFileSize(1) + .withGCOptions(gcOptions) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) + .build(); SegmentNodeStore nodeStore = SegmentNodeStoreBuilders .builder(fileStore).build(); @@ -457,10 +478,10 @@ public class CompactionAndCleanupIT { } // 5Mb, de-duplication by the SegmentWriter - long size1 = fileStore.size(); + long size1 = fileStore.getStats().getApproximateSize(); fileStore.compact(); fileStore.cleanup(); - long size2 = fileStore.size(); + long size2 = fileStore.getStats().getApproximateSize(); assertSize("with compacted binaries", size2, 0, size1 * 11 / 10); } finally { fileStore.close(); @@ -723,8 +744,8 @@ public class CompactionAndCleanupIT { try { // The 1M blob should get gc-ed fileStore.cleanup(); - assertTrue(ref + " repository size " + fileStore.size() + " < " + 1024 * 1024, - fileStore.size() < 1024 * 1024); + assertTrue(ref + " repository size " + fileStore.getStats().getApproximateSize() + " < " + 1024 * 1024, + fileStore.getStats().getApproximateSize() < 1024 * 1024); } finally { fileStore.close(); } diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java index 9ab492b..cd625c3 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java @@ -31,6 +31,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.List; import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import javax.annotation.Nonnull; @@ -52,6 +54,7 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; +import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; import org.junit.After; import org.junit.Ignore; import org.junit.Rule; @@ -159,8 +162,12 @@ public class ExternalBlobIT { protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws IOException { if (nodeStore == null) { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + store = fileStoreBuilder(getWorkDir()).withBlobStore(blobStore) - .withMaxFileSize(1).build(); + .withMaxFileSize(1) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) + .build(); nodeStore = SegmentNodeStoreBuilders.builder(store).build(); } return nodeStore; @@ -271,13 +278,13 @@ public class ExternalBlobIT { store.flush(); // blob went to the external store - assertTrue(store.size() < 10 * 1024); + assertTrue(store.getStats().getApproximateSize() < 10 * 1024); close(); SegmentGCOptions gcOptions = defaultGCOptions().setOffline(); store = fileStoreBuilder(getWorkDir()).withMaxFileSize(1) .withGCOptions(gcOptions).build(); - assertTrue(store.size() < 10 * 1024); + assertTrue(store.getStats().getApproximateSize() < 10 * 1024); store.compact(); store.cleanup(); diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java index f3a5c09..5d0dcfa 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java @@ -52,6 +52,8 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; @@ -92,6 +94,7 @@ import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration; import org.apache.jackrabbit.oak.spi.whiteboard.Registration; import org.apache.jackrabbit.oak.stats.Clock; +import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -229,10 +232,12 @@ public class SegmentCompactionIT { }, 1, 1, SECONDS); SegmentGCOptions gcOptions = defaultGCOptions().setLockWaitTime(lockWaitTime); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); fileStore = fileStoreBuilder(folder.getRoot()) .withMemoryMapping(true) .withGCMonitor(gcMonitor) .withGCOptions(gcOptions) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) .build(); nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build(); @@ -299,7 +304,7 @@ public class SegmentCompactionIT { scheduler.scheduleAtFixedRate(new Runnable() { @Override public void run() { - fileStoreSize = fileStore.size(); + fileStoreSize = fileStore.getStats().getApproximateSize(); } }, 1, 1, MINUTES); } diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java index 7f393b9..8bdbc45 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java @@ -42,6 +42,7 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -75,6 +76,7 @@ import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; import org.junit.After; import org.junit.BeforeClass; import org.junit.Rule; @@ -105,10 +107,12 @@ public class SegmentDataStoreBlobGCIT { protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws IOException { if (nodeStore == null) { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); FileStoreBuilder builder = fileStoreBuilder(getWorkDir()) .withBlobStore(blobStore) .withMaxFileSize(256) .withMemoryMapping(false) + .withStatisticsProvider(new DefaultStatisticsProvider(executor)) .withGCOptions(gcOptions); store = builder.build(); nodeStore = SegmentNodeStoreBuilders.builder(store).build(); @@ -144,7 +148,7 @@ public class SegmentDataStoreBlobGCIT { } nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - final long dataSize = store.size(); + final long dataSize = store.getStats().getApproximateSize(); log.info("File store dataSize {}", byteCountToDisplaySize(dataSize)); // 2. Now remove the nodes to generate garbage