Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java (revision 0) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java (working copy) @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.segment; + +import com.google.common.base.Supplier; +import org.apache.jackrabbit.oak.api.PropertyState; +import org.apache.jackrabbit.oak.spi.state.NodeState; +import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; + +/** + * A {@code NodeStateDiff} that cancels itself when a condition occurs. The + * condition is represented by an externally provided instance of {@code + * Supplier}. If the {@code Supplier} returns {@code true}, the diffing process + * will be canceled at the first possible occasion. + */ +class CancelableDiff implements NodeStateDiff { + + private final NodeStateDiff delegate; + + private final Supplier canceled; + + public CancelableDiff(NodeStateDiff delegate, Supplier canceled) { + this.delegate = delegate; + this.canceled = canceled; + } + + @Override + public final boolean propertyAdded(PropertyState after) { + if (canceled.get()) { + return false; + } + + return delegate.propertyAdded(after); + } + + @Override + public final boolean propertyChanged(PropertyState before, PropertyState after) { + if (canceled.get()) { + return false; + } + + return delegate.propertyChanged(before, after); + } + + @Override + public final boolean propertyDeleted(PropertyState before) { + if (canceled.get()) { + return false; + } + + return delegate.propertyDeleted(before); + } + + @Override + public final boolean childNodeAdded(String name, NodeState after) { + if (canceled.get()) { + return false; + } + + return delegate.childNodeAdded(name, after); + } + + @Override + public final boolean childNodeChanged(String name, NodeState before, NodeState after) { + if (canceled.get()) { + return false; + } + + return delegate.childNodeChanged(name, before, after); + } + + @Override + public final boolean childNodeDeleted(String name, NodeState before) { + if (canceled.get()) { + return false; + } + + return delegate.childNodeDeleted(name, before); + } + +} Property changes on: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java (revision 1706959) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java (working copy) @@ -16,20 +16,10 @@ */ package org.apache.jackrabbit.oak.plugins.segment; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.commons.PathUtils.concat; - -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - import com.google.common.base.Predicate; import com.google.common.base.Predicates; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; import com.google.common.hash.Hashing; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.PropertyState; @@ -44,9 +34,22 @@ import org.apache.jackrabbit.oak.spi.state.ApplyDiff; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; +import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Maps.newHashMap; +import static org.apache.jackrabbit.oak.api.Type.BINARIES; +import static org.apache.jackrabbit.oak.api.Type.BINARY; +import static org.apache.jackrabbit.oak.commons.PathUtils.concat; + /** * Tool for compacting segments. */ @@ -88,13 +91,30 @@ */ private final boolean cloneBinaries; + /** + * Allows the cancellation of the compaction process. If this {@code + * Supplier} returns {@code true}, this compactor will cancel compaction and + * return a partial {@code SegmentNodeState} containing the changes + * compacted before the cancellation. + */ + private final Supplier cancel; + public Compactor(SegmentWriter writer) { + this(writer, Suppliers.ofInstance(false)); + } + + public Compactor(SegmentWriter writer, Supplier cancel) { this.writer = writer; this.map = new InMemoryCompactionMap(writer.getTracker()); this.cloneBinaries = false; + this.cancel = cancel; } public Compactor(FileStore store, CompactionStrategy compactionStrategy) { + this(store, compactionStrategy, Suppliers.ofInstance(false)); + } + + public Compactor(FileStore store, CompactionStrategy compactionStrategy, Supplier cancel) { this.writer = store.createSegmentWriter(); if (compactionStrategy.getPersistCompactionMap()) { this.map = new PersistedCompactionMap(store); @@ -105,11 +125,12 @@ if (compactionStrategy.isOfflineCompaction()) { includeInMap = new OfflineCompactionPredicate(); } + this.cancel = cancel; } protected SegmentNodeBuilder process(NodeState before, NodeState after, NodeState onto) { SegmentNodeBuilder builder = new SegmentNodeBuilder(writer.writeNode(onto), writer); - after.compareAgainstBaseState(before, new CompactDiff(builder)); + after.compareAgainstBaseState(before, newCompactionDiff(builder)); return builder; } @@ -215,7 +236,7 @@ NodeBuilder child = EmptyNodeState.EMPTY_NODE.builder(); boolean success = EmptyNodeState.compareAgainstEmptyState(after, - new CompactDiff(child, path, name)); + newCompactionDiff(child, path, name)); if (success) { SegmentNodeState state = writer.writeNode(child.getNodeState()); @@ -248,7 +269,7 @@ NodeBuilder child = builder.getChildNode(name); boolean success = after.compareAgainstBaseState(before, - new CompactDiff(child, path, name)); + newCompactionDiff(child, path, name)); if (success) { RecordId compactedId = writer.writeNode(child.getNodeState()) @@ -263,6 +284,14 @@ } + private NodeStateDiff newCompactionDiff(NodeBuilder builder) { + return new CancelableDiff(new CompactDiff(builder), cancel); + } + + private NodeStateDiff newCompactionDiff(NodeBuilder child, String path, String name) { + return new CancelableDiff(new CompactDiff(child, path, name), cancel); + } + private PropertyState compact(PropertyState property) { String name = property.getName(); Type type = property.getType(); Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategy.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategy.java (revision 1706959) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategy.java (working copy) @@ -18,16 +18,15 @@ */ package org.apache.jackrabbit.oak.plugins.segment.compaction; +import org.apache.jackrabbit.oak.plugins.segment.SegmentId; + +import javax.annotation.Nonnull; +import java.util.concurrent.Callable; + import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static java.lang.System.currentTimeMillis; -import java.util.concurrent.Callable; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; - public abstract class CompactionStrategy { public enum CleanupType { @@ -293,4 +292,18 @@ this.offlineCompaction = offlineCompaction; } + /** + * Check if the approximate repository size is getting too big compared with + * the available space on disk. + * + * @param repositoryDiskSpace Approximate size of the disk space occupied by + * the repository. + * @param availableDiskSpace Currently available disk space. + * @return {@code true} if the available disk space is considered enough for + * normal repository operations. + */ + public boolean isDiskSpaceSufficient(long repositoryDiskSpace, long availableDiskSpace) { + return availableDiskSpace > 0.25 * repositoryDiskSpace; + } + } Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java (revision 1706959) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java (working copy) @@ -16,46 +16,8 @@ */ package org.apache.jackrabbit.oak.plugins.segment.file; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Lists.newLinkedList; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Maps.newLinkedHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.lang.String.format; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.CompactionMap.sum; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.NO_COMPACTION; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.channels.FileLock; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import javax.annotation.Nonnull; - import com.google.common.base.Stopwatch; +import com.google.common.base.Supplier; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob; import org.apache.jackrabbit.oak.plugins.segment.CompactionMap; @@ -79,6 +41,46 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nonnull; +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileLock; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Lists.newArrayListWithCapacity; +import static com.google.common.collect.Lists.newLinkedList; +import static com.google.common.collect.Maps.newHashMap; +import static com.google.common.collect.Maps.newLinkedHashMap; +import static com.google.common.collect.Sets.newHashSet; +import static java.lang.String.format; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; +import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; +import static org.apache.jackrabbit.oak.plugins.segment.CompactionMap.sum; +import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.NO_COMPACTION; + /** * The storage implementation for tar files. */ @@ -148,6 +150,14 @@ */ private final BackgroundThread compactionThread; + /** + * This background thread periodically asks the {@code CompactionStrategy} + * to compare the approximate size of the repository with the available disk + * space. The result of this comparison is stored in the state of this + * {@code FileStore}. + */ + private final BackgroundThread diskSpaceThread; + private CompactionStrategy compactionStrategy = NO_COMPACTION; /** @@ -173,6 +183,17 @@ private final GCMonitor gcMonitor; /** + * Represents the approximate size on disk of the repository. + */ + private final AtomicLong approximateSize; + + /** + * This flag is periodically updated by calling the {@code + * CompactionStrategy} at regular intervals. + */ + private final AtomicBoolean sufficientDiskSpace; + + /** * Create a new instance of a {@link Builder} for a file store. * @param directory directory where the tar files are stored * @return a new {@link Builder} instance. @@ -456,11 +477,26 @@ maybeCompact(true); } }); + + diskSpaceThread = new BackgroundThread("TarMK disk space check [" + directory + "]", TimeUnit.MINUTES.toMillis(1), new Runnable() { + + @Override + public void run() { + checkDiskSpace(); + } + + }); + + approximateSize = new AtomicLong(size()); } else { this.flushThread = null; this.compactionThread = null; + diskSpaceThread = null; + approximateSize = null; } + sufficientDiskSpace = new AtomicBoolean(true); + if (readonly) { log.info("TarMK ReadOnly opened: {} (mmap={})", directory, memoryMapping); @@ -773,6 +809,7 @@ cm.remove(cleanedIds); long finalSize = size(); + approximateSize.set(finalSize); gcMonitor.cleaned(initialSize - finalSize, finalSize); gcMonitor.info("TarMK revision cleanup completed in {}. Post cleanup size is {} " + "and space reclaimed {}. Compaction map weight/depth is {}/{}.", watch, @@ -791,6 +828,27 @@ } /** + * Returns the cancellation policy for the compaction phase. If the disk + * space was considered insufficient at least once during compaction (or if + * the space was never sufficient to begin with), compaction is considered + * canceled. + * + * @return a flag indicating if compaction should be canceled. + */ + private Supplier newCancelCompactionCondition() { + return new Supplier() { + + private boolean hit = false; + + @Override + public Boolean get() { + return (hit = hit || !sufficientDiskSpace.get()); + } + + }; + } + + /** * Copy every referenced record in data (non-bulk) segments. Bulk segments * are fully kept (they are only removed in cleanup, if there is no * reference to them). @@ -801,7 +859,8 @@ gcMonitor.info("TarMK compaction running, strategy={}", compactionStrategy); long start = System.currentTimeMillis(); - Compactor compactor = new Compactor(this, compactionStrategy); + Supplier compactionCanceled = newCancelCompactionCondition(); + Compactor compactor = new Compactor(this, compactionStrategy, compactionCanceled); SegmentNodeState before = getHead(); long existing = before.getChildNode(SegmentNodeStore.CHECKPOINTS) .getChildNodeCount(Long.MAX_VALUE); @@ -813,6 +872,11 @@ SegmentNodeState after = compactor.compact(EMPTY_NODE, before, EMPTY_NODE); + if (compactionCanceled.get()) { + gcMonitor.warn("TarMK compaction was canceled, not enough disk space available."); + return; + } + Callable setHead = new SetHead(before, after, compactor); try { int cycles = 0; @@ -826,6 +890,12 @@ "Compacting these commits. Cycle {}", cycles); SegmentNodeState head = getHead(); after = compactor.compact(before, head, after); + + if (compactionCanceled.get()) { + gcMonitor.warn("TarMK compaction was canceled, not enough disk space available."); + return; + } + before = head; setHead = new SetHead(head, after, compactor); } @@ -899,6 +969,7 @@ // threads before acquiring the synchronization lock closeAndLogOnFail(compactionThread); closeAndLogOnFail(flushThread); + closeAndLogOnFail(diskSpaceThread); synchronized (this) { try { flush(); @@ -1035,6 +1106,7 @@ if (size >= maxFileSize) { newWriter(); } + approximateSize.addAndGet(TarWriter.BLOCK_SIZE + length + TarWriter.getPaddingSize(length)); } catch (IOException e) { throw new RuntimeException(e); } @@ -1117,6 +1189,25 @@ persistedHead.set(id); } + private void checkDiskSpace() { + long repositoryDiskSpace = approximateSize.get(); + long availableDiskSpace = directory.getFreeSpace(); + boolean updated = compactionStrategy.isDiskSpaceSufficient(repositoryDiskSpace, availableDiskSpace); + boolean previous = sufficientDiskSpace.getAndSet(updated); + + if (previous && !updated) { + log.warn("Available disk space ({}) is too low, current repository size is approx. {}", + humanReadableByteCount(availableDiskSpace), + humanReadableByteCount(repositoryDiskSpace)); + } + + if (updated && !previous) { + log.info("Available disk space ({}) is sufficient again for repository operations, current repository size is approx. {}", + humanReadableByteCount(availableDiskSpace), + humanReadableByteCount(repositoryDiskSpace)); + } + } + /** * A read only {@link FileStore} implementation that supports * going back to old revisions. Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/package-info.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/package-info.java (revision 1706959) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/package-info.java (working copy) @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -@Version("5.0.0") +@Version("5.1.0") @Export(optional = "provide:=true") package org.apache.jackrabbit.oak.plugins.segment; Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java (revision 0) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java (working copy) @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.segment; + +import com.google.common.base.Suppliers; +import org.apache.jackrabbit.oak.api.PropertyState; +import org.apache.jackrabbit.oak.spi.state.NodeState; +import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + +public class CancelableDiffTest { + + @Test + public void testPropertyAddedInterruptible() throws Throwable { + PropertyState after = mock(PropertyState.class); + + NodeStateDiff wrapped = mock(NodeStateDiff.class); + doReturn(true).when(wrapped).propertyAdded(after); + + assertTrue(newCancelableDiff(wrapped, false).propertyAdded(after)); + assertFalse(newCancelableDiff(wrapped, true).propertyAdded(after)); + } + + @Test + public void testPropertyChangedInterruptible() throws Throwable { + PropertyState before = mock(PropertyState.class); + PropertyState after = mock(PropertyState.class); + + NodeStateDiff wrapped = mock(NodeStateDiff.class); + doReturn(true).when(wrapped).propertyChanged(before, after); + + assertTrue(newCancelableDiff(wrapped, false).propertyChanged(before, after)); + assertFalse(newCancelableDiff(wrapped, true).propertyChanged(before, after)); + } + + @Test + public void testPropertyDeletedInterruptible() throws Throwable { + PropertyState before = mock(PropertyState.class); + + NodeStateDiff wrapped = mock(NodeStateDiff.class); + doReturn(true).when(wrapped).propertyDeleted(before); + + assertTrue(newCancelableDiff(wrapped, false).propertyDeleted(before)); + assertFalse(newCancelableDiff(wrapped, true).propertyDeleted(before)); + } + + @Test + public void testChildNodeAddedInterruptible() throws Throwable { + NodeState after = mock(NodeState.class); + + NodeStateDiff wrapped = mock(NodeStateDiff.class); + doReturn(true).when(wrapped).childNodeAdded("name", after); + + assertTrue(newCancelableDiff(wrapped, false).childNodeAdded("name", after)); + assertFalse(newCancelableDiff(wrapped, true).childNodeAdded("name", after)); + } + + @Test + public void testChildNodeChangedInterruptible() throws Throwable { + NodeState before = mock(NodeState.class); + NodeState after = mock(NodeState.class); + + NodeStateDiff wrapped = mock(NodeStateDiff.class); + doReturn(true).when(wrapped).childNodeChanged("name", before, after); + + assertTrue(newCancelableDiff(wrapped, false).childNodeChanged("name", before, after)); + assertFalse(newCancelableDiff(wrapped, true).childNodeChanged("name", before, after)); + } + + @Test + public void testChildNodeDeletedInterruptible() throws Throwable { + NodeState before = mock(NodeState.class); + + NodeStateDiff wrapped = mock(NodeStateDiff.class); + doReturn(true).when(wrapped).childNodeDeleted("name", before); + + assertTrue(newCancelableDiff(wrapped, false).childNodeDeleted("name", before)); + assertFalse(newCancelableDiff(wrapped, true).childNodeDeleted("name", before)); + } + + private NodeStateDiff newCancelableDiff(NodeStateDiff wrapped, boolean cancel) { + return new CancelableDiff(wrapped, Suppliers.ofInstance(cancel)); + } + +} Property changes on: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactorTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactorTest.java (revision 1706959) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactorTest.java (working copy) @@ -16,8 +16,9 @@ */ package org.apache.jackrabbit.oak.plugins.segment; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; import junit.framework.Assert; - import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; @@ -27,35 +28,63 @@ import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; import org.apache.jackrabbit.oak.spi.state.NodeStore; +import org.junit.After; +import org.junit.Before; import org.junit.Test; +import static org.junit.Assert.assertFalse; + public class CompactorTest { + private SegmentStore segmentStore; + + @Before + public void openSegmentStore() { + segmentStore = new MemoryStore(); + } + + @After + public void closeSegmentStore() { + segmentStore.close(); + } + @Test public void testCompactor() throws Exception { - MemoryStore source = new MemoryStore(); - try { - NodeStore store = new SegmentNodeStore(source); - init(store); + NodeStore store = new SegmentNodeStore(segmentStore); + init(store); - Compactor compactor = new Compactor(source.getTracker().getWriter()); - addTestContent(store, 0); + Compactor compactor = new Compactor(segmentStore.getTracker().getWriter()); + addTestContent(store, 0); - NodeState initial = store.getRoot(); - SegmentNodeState after = compactor - .compact(initial, store.getRoot(), initial); - Assert.assertEquals(store.getRoot(), after); + NodeState initial = store.getRoot(); + SegmentNodeState after = compactor + .compact(initial, store.getRoot(), initial); + Assert.assertEquals(store.getRoot(), after); - addTestContent(store, 1); - after = compactor.compact(initial, store.getRoot(), initial); - Assert.assertEquals(store.getRoot(), after); + addTestContent(store, 1); + after = compactor.compact(initial, store.getRoot(), initial); + Assert.assertEquals(store.getRoot(), after); + } - } finally { - source.close(); - } + @Test + public void testCancel() throws Throwable { + // Create a Compactor that will cancel itself as soon as possible. The + // early cancellation is the reason why the returned SegmentNodeState + // doesn't have the child named "b". + + NodeStore store = SegmentNodeStore.newSegmentNodeStore(segmentStore).create(); + Compactor compactor = new Compactor(segmentStore.getTracker().getWriter(), Suppliers.ofInstance(true)); + SegmentNodeState sns = compactor.compact(store.getRoot(), addChild(store.getRoot(), "b"), store.getRoot()); + assertFalse(sns.hasChildNode("b")); } + private NodeState addChild(NodeState current, String name) { + NodeBuilder builder = current.builder(); + builder.child(name); + return builder.getNodeState(); + } + private static void init(NodeStore store) { new Oak(store).with(new OpenSecurityProvider()) .createContentRepository();