diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java index a8a61ee367..090febfc8f 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java @@ -26,7 +26,7 @@ import java.io.IOException; import com.google.common.base.Stopwatch; import org.apache.jackrabbit.oak.backup.FileStoreBackup; -import org.apache.jackrabbit.oak.segment.Compactor; +import org.apache.jackrabbit.oak.segment.ClassicCompactor; import org.apache.jackrabbit.oak.segment.DefaultSegmentWriter; import org.apache.jackrabbit.oak.segment.Revisions; import org.apache.jackrabbit.oak.segment.SegmentBufferWriter; @@ -85,7 +85,7 @@ public class FileStoreBackupImpl implements FileStoreBackup { new WriterCacheManager.Default(), bufferWriter ); - Compactor compactor = new Compactor( + ClassicCompactor compactor = new ClassicCompactor( backup.getReader(), writer, backup.getBlobStore(), diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java index 751f337d04..261cebc050 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java @@ -29,7 +29,7 @@ import com.google.common.base.Stopwatch; import com.google.common.base.Suppliers; import org.apache.jackrabbit.oak.backup.FileStoreRestore; import org.apache.jackrabbit.oak.segment.DefaultSegmentWriter; -import org.apache.jackrabbit.oak.segment.Compactor; +import org.apache.jackrabbit.oak.segment.ClassicCompactor; import org.apache.jackrabbit.oak.segment.SegmentBufferWriter; import org.apache.jackrabbit.oak.segment.SegmentNodeState; import org.apache.jackrabbit.oak.segment.SegmentWriter; @@ -82,7 +82,7 @@ public class FileStoreRestoreImpl implements FileStoreRestore { bufferWriter ); SegmentGCOptions gcOptions = defaultGCOptions().setOffline(); - Compactor compactor = new Compactor( + ClassicCompactor compactor = new ClassicCompactor( store.getReader(), writer, store.getBlobStore(), diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java index 6b9777df96..bd0aad5955 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java @@ -55,7 +55,7 @@ import org.jetbrains.annotations.Nullable; * the same checkpoint or root state occur again in a later compaction retry cycle. * */ -public class CheckpointCompactor { +public class CheckpointCompactor implements Compactor { @NotNull private final GCMonitor gcListener; @@ -63,7 +63,7 @@ public class CheckpointCompactor { private final Map cpCache = newHashMap(); @NotNull - private final Compactor compactor; + private final ClassicCompactor compactor; @NotNull private final NodeWriter nodeWriter; @@ -75,6 +75,7 @@ public class CheckpointCompactor { /** * Create a new instance based on the passed arguments. + * @param gcListener listener receiving notifications about the garbage collection process * @param reader segment reader used to read from the segments * @param writer segment writer used to serialise to segments * @param blobStore the blob store or {@code null} if none @@ -88,7 +89,7 @@ public class CheckpointCompactor { @Nullable BlobStore blobStore, @NotNull GCNodeWriteMonitor compactionMonitor) { this.gcListener = gcListener; - this.compactor = new Compactor(reader, writer, blobStore, compactionMonitor); + this.compactor = new ClassicCompactor(reader, writer, blobStore, compactionMonitor); this.nodeWriter = (node, stableId) -> { RecordId nodeId = writer.writeNode(node, stableId); return new SegmentNodeState(reader, writer, blobStore, nodeId); @@ -104,6 +105,7 @@ public class CheckpointCompactor { * @return compacted clone of {@code uncompacted} or {@code null} if cancelled. * @throws IOException */ + @Override @Nullable public SegmentNodeState compact( @NotNull NodeState base, diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java new file mode 100644 index 0000000000..005ade9311 --- /dev/null +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.jackrabbit.oak.segment; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.collect.Lists.newArrayList; +import static org.apache.jackrabbit.oak.api.Type.BINARIES; +import static org.apache.jackrabbit.oak.api.Type.BINARY; +import static org.apache.jackrabbit.oak.plugins.memory.BinaryPropertyState.binaryProperty; +import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; +import static org.apache.jackrabbit.oak.plugins.memory.MultiBinaryPropertyState.binaryPropertyFromBlob; +import static org.apache.jackrabbit.oak.plugins.memory.PropertyStates.createProperty; + +import java.io.IOException; +import java.util.List; + +import org.apache.jackrabbit.oak.api.Blob; +import org.apache.jackrabbit.oak.api.PropertyState; +import org.apache.jackrabbit.oak.api.Type; +import org.apache.jackrabbit.oak.commons.Buffer; +import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeBuilder; +import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor; +import org.apache.jackrabbit.oak.segment.file.cancel.Canceller; +import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.state.NodeState; +import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Instances of this class can be used to compact a node state. I.e. to create a clone + * of a given node state without value sharing except for binaries. Binaries that are + * stored in a list of bulk segments will still value share the bulk segments (but not + * the list records). + * A node can either be compacted on its own or alternatively the difference between + * two nodes can be compacted on top of an already compacted node. + */ +public class ClassicCompactor implements Compactor { + + /** + * Number of content updates that need to happen before the updates + * are automatically purged to the underlying segments. + */ + static final int UPDATE_LIMIT = + Integer.getInteger("compaction.update.limit", 10000); + + @NotNull + private final SegmentWriter writer; + + @NotNull + private final SegmentReader reader; + + @Nullable + private final BlobStore blobStore; + + @NotNull + private final GCNodeWriteMonitor compactionMonitor; + + /** + * Create a new instance based on the passed arguments. + * @param reader segment reader used to read from the segments + * @param writer segment writer used to serialise to segments + * @param blobStore the blob store or {@code null} if none + * @param compactionMonitor notification call back for each compacted nodes, + * properties, and binaries + */ + public ClassicCompactor( + @NotNull SegmentReader reader, + @NotNull SegmentWriter writer, + @Nullable BlobStore blobStore, + @NotNull GCNodeWriteMonitor compactionMonitor) { + this.writer = checkNotNull(writer); + this.reader = checkNotNull(reader); + this.blobStore = blobStore; + this.compactionMonitor = checkNotNull(compactionMonitor); + } + + /** + * Compact a given {@code state} + * @param state the node state to compact + * @return the compacted node state or {@code null} if cancelled. + * @throws IOException + */ + @Nullable + public SegmentNodeState compact(@NotNull NodeState state, Canceller canceller) throws IOException { + return compact(EMPTY_NODE, state, EMPTY_NODE, canceller); + } + + /** + * compact the differences between {@code after} and {@code before} on top of {@code ont}. + * @param before the node state to diff against from {@code after} + * @param after the node state diffed against {@code before} + * @param onto the node state compacted onto + * @return the compacted node state or {@code null} if cancelled. + * @throws IOException + */ + @Nullable + public SegmentNodeState compact( + @NotNull NodeState before, + @NotNull NodeState after, + @NotNull NodeState onto, + Canceller canceller + ) throws IOException { + checkNotNull(before); + checkNotNull(after); + checkNotNull(onto); + return new CompactDiff(onto, canceller).diff(before, after); + } + + @Nullable + private static Buffer getStableIdBytes(NodeState state) { + if (state instanceof SegmentNodeState) { + return ((SegmentNodeState) state).getStableIdBytes(); + } else { + return null; + } + } + + private class CompactDiff implements NodeStateDiff { + @NotNull + private MemoryNodeBuilder builder; + + @NotNull + private final NodeState base; + + private final Canceller canceller; + + @Nullable + private IOException exception; + + private long modCount; + + private void updated() throws IOException { + if (++modCount % UPDATE_LIMIT == 0) { + RecordId newBaseId = writer.writeNode(builder.getNodeState(), null); + SegmentNodeState newBase = new SegmentNodeState(reader, writer, blobStore, newBaseId); + builder = new MemoryNodeBuilder(newBase); + } + } + + CompactDiff(@NotNull NodeState base, Canceller canceller) { + this.builder = new MemoryNodeBuilder(checkNotNull(base)); + this.canceller = canceller; + this.base = base; + } + + @Nullable + SegmentNodeState diff(@NotNull NodeState before, @NotNull NodeState after) throws IOException { + boolean success = after.compareAgainstBaseState(before, new CancelableDiff(this, () -> canceller.check().isCancelled())); + if (exception != null) { + throw new IOException(exception); + } else if (success) { + NodeState nodeState = builder.getNodeState(); + checkState(modCount == 0 || !(nodeState instanceof SegmentNodeState)); + RecordId nodeId = writer.writeNode(nodeState, getStableIdBytes(after)); + compactionMonitor.onNode(); + return new SegmentNodeState(reader, writer, blobStore, nodeId); + } else { + return null; + } + } + + @Override + public boolean propertyAdded(@NotNull PropertyState after) { + builder.setProperty(compact(after)); + return true; + } + + @Override + public boolean propertyChanged(@NotNull PropertyState before, @NotNull PropertyState after) { + builder.setProperty(compact(after)); + return true; + } + + @Override + public boolean propertyDeleted(PropertyState before) { + builder.removeProperty(before.getName()); + return true; + } + + @Override + public boolean childNodeAdded(@NotNull String name, @NotNull NodeState after) { + try { + SegmentNodeState compacted = compact(after, canceller); + if (compacted != null) { + updated(); + builder.setChildNode(name, compacted); + return true; + } else { + return false; + } + } catch (IOException e) { + exception = e; + return false; + } + } + + @Override + public boolean childNodeChanged(@NotNull String name, @NotNull NodeState before, @NotNull NodeState after) { + try { + SegmentNodeState compacted = compact(before, after, base.getChildNode(name), canceller); + if (compacted != null) { + updated(); + builder.setChildNode(name, compacted); + return true; + } else { + return false; + } + } catch (IOException e) { + exception = e; + return false; + } + } + + @Override + public boolean childNodeDeleted(String name, NodeState before) { + try { + updated(); + builder.getChildNode(name).remove(); + return true; + } catch (IOException e) { + exception = e; + return false; + } + } + } + + @NotNull + private PropertyState compact(@NotNull PropertyState property) { + compactionMonitor.onProperty(); + String name = property.getName(); + Type type = property.getType(); + if (type == BINARY) { + compactionMonitor.onBinary(); + return binaryProperty(name, property.getValue(Type.BINARY)); + } else if (type == BINARIES) { + List blobs = newArrayList(); + for (Blob blob : property.getValue(BINARIES)) { + compactionMonitor.onBinary(); + blobs.add(blob); + } + return binaryPropertyFromBlob(name, blobs); + } else { + return createProperty(name, property.getValue(type), type); + } + } + +} diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java index 746aa5040d..ebc52ffdd2 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java @@ -1,264 +1,31 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * http://www.apache.org/licenses/LICENSE-2.0 * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.apache.jackrabbit.oak.segment; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayList; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.plugins.memory.BinaryPropertyState.binaryProperty; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.memory.MultiBinaryPropertyState.binaryPropertyFromBlob; -import static org.apache.jackrabbit.oak.plugins.memory.PropertyStates.createProperty; - -import java.io.IOException; -import java.util.List; +package org.apache.jackrabbit.oak.segment; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.commons.Buffer; -import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeBuilder; -import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor; import org.apache.jackrabbit.oak.segment.file.cancel.Canceller; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; - -/** - * Instances of this class can be used to compact a node state. I.e. to create a clone - * of a given node state without value sharing except for binaries. Binaries that are - * stored in a list of bulk segments will still value share the bulk segments (but not - * the list records). - * A node can either be compacted on its own or alternatively the difference between - * two nodes can be compacted on top of an already compacted node. - */ -public class Compactor { - - /** - * Number of content updates that need to happen before the updates - * are automatically purged to the underlying segments. - */ - static final int UPDATE_LIMIT = - Integer.getInteger("compaction.update.limit", 10000); - - @NotNull - private final SegmentWriter writer; - - @NotNull - private final SegmentReader reader; - - @Nullable - private final BlobStore blobStore; - - @NotNull - private final GCNodeWriteMonitor compactionMonitor; - - /** - * Create a new instance based on the passed arguments. - * @param reader segment reader used to read from the segments - * @param writer segment writer used to serialise to segments - * @param blobStore the blob store or {@code null} if none - * @param compactionMonitor notification call back for each compacted nodes, - * properties, and binaries - */ - public Compactor( - @NotNull SegmentReader reader, - @NotNull SegmentWriter writer, - @Nullable BlobStore blobStore, - @NotNull GCNodeWriteMonitor compactionMonitor) { - this.writer = checkNotNull(writer); - this.reader = checkNotNull(reader); - this.blobStore = blobStore; - this.compactionMonitor = checkNotNull(compactionMonitor); - } - - /** - * Compact a given {@code state} - * @param state the node state to compact - * @return the compacted node state or {@code null} if cancelled. - * @throws IOException - */ - @Nullable - public SegmentNodeState compact(@NotNull NodeState state, Canceller canceller) throws IOException { - return compact(EMPTY_NODE, state, EMPTY_NODE, canceller); - } - - /** - * compact the differences between {@code after} and {@code before} on top of {@code ont}. - * @param before the node state to diff against from {@code after} - * @param after the node state diffed against {@code before} - * @param onto the node state compacted onto - * @return the compacted node state or {@code null} if cancelled. - * @throws IOException - */ - @Nullable - public SegmentNodeState compact( - @NotNull NodeState before, - @NotNull NodeState after, - @NotNull NodeState onto, - Canceller canceller - ) throws IOException { - checkNotNull(before); - checkNotNull(after); - checkNotNull(onto); - return new CompactDiff(onto, canceller).diff(before, after); - } - - @Nullable - private static Buffer getStableIdBytes(NodeState state) { - if (state instanceof SegmentNodeState) { - return ((SegmentNodeState) state).getStableIdBytes(); - } else { - return null; - } - } - private class CompactDiff implements NodeStateDiff { - @NotNull - private MemoryNodeBuilder builder; - - @NotNull - private final NodeState base; - - private final Canceller canceller; - - @Nullable - private IOException exception; - - private long modCount; - - private void updated() throws IOException { - if (++modCount % UPDATE_LIMIT == 0) { - RecordId newBaseId = writer.writeNode(builder.getNodeState(), null); - SegmentNodeState newBase = new SegmentNodeState(reader, writer, blobStore, newBaseId); - builder = new MemoryNodeBuilder(newBase); - } - } - - CompactDiff(@NotNull NodeState base, Canceller canceller) { - this.builder = new MemoryNodeBuilder(checkNotNull(base)); - this.canceller = canceller; - this.base = base; - } - - @Nullable - SegmentNodeState diff(@NotNull NodeState before, @NotNull NodeState after) throws IOException { - boolean success = after.compareAgainstBaseState(before, new CancelableDiff(this, () -> canceller.check().isCancelled())); - if (exception != null) { - throw new IOException(exception); - } else if (success) { - NodeState nodeState = builder.getNodeState(); - checkState(modCount == 0 || !(nodeState instanceof SegmentNodeState)); - RecordId nodeId = writer.writeNode(nodeState, getStableIdBytes(after)); - compactionMonitor.onNode(); - return new SegmentNodeState(reader, writer, blobStore, nodeId); - } else { - return null; - } - } - - @Override - public boolean propertyAdded(@NotNull PropertyState after) { - builder.setProperty(compact(after)); - return true; - } - - @Override - public boolean propertyChanged(@NotNull PropertyState before, @NotNull PropertyState after) { - builder.setProperty(compact(after)); - return true; - } - - @Override - public boolean propertyDeleted(PropertyState before) { - builder.removeProperty(before.getName()); - return true; - } - - @Override - public boolean childNodeAdded(@NotNull String name, @NotNull NodeState after) { - try { - SegmentNodeState compacted = compact(after, canceller); - if (compacted != null) { - updated(); - builder.setChildNode(name, compacted); - return true; - } else { - return false; - } - } catch (IOException e) { - exception = e; - return false; - } - } - - @Override - public boolean childNodeChanged(@NotNull String name, @NotNull NodeState before, @NotNull NodeState after) { - try { - SegmentNodeState compacted = compact(before, after, base.getChildNode(name), canceller); - if (compacted != null) { - updated(); - builder.setChildNode(name, compacted); - return true; - } else { - return false; - } - } catch (IOException e) { - exception = e; - return false; - } - } - - @Override - public boolean childNodeDeleted(String name, NodeState before) { - try { - updated(); - builder.getChildNode(name).remove(); - return true; - } catch (IOException e) { - exception = e; - return false; - } - } - } - - @NotNull - private PropertyState compact(@NotNull PropertyState property) { - compactionMonitor.onProperty(); - String name = property.getName(); - Type type = property.getType(); - if (type == BINARY) { - compactionMonitor.onBinary(); - return binaryProperty(name, property.getValue(Type.BINARY)); - } else if (type == BINARIES) { - List blobs = newArrayList(); - for (Blob blob : property.getValue(BINARIES)) { - compactionMonitor.onBinary(); - blobs.add(blob); - } - return binaryPropertyFromBlob(name, blobs); - } else { - return createProperty(name, property.getValue(type), type); - } - } +import java.io.IOException; -} +public interface Compactor { + SegmentNodeState compact(@NotNull NodeState before, @NotNull NodeState after, @NotNull NodeState onto, + Canceller canceller) throws IOException; +} \ No newline at end of file diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java index 126db3e1ba..b979cfbd30 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/compaction/SegmentGCOptions.java @@ -43,6 +43,21 @@ public class SegmentGCOptions { TAIL } + /** + * The compactor type + */ + public enum CompactorType { + /** + * Simple compactor implementation + */ + CLASSIC_COMPACTOR, + + /** + * Checkpoints aware compaction implementation + */ + CHECKPOINT_COMPACTOR + } + /** * Default value for {@link #isPaused()} */ @@ -113,6 +128,8 @@ public class SegmentGCOptions { */ private long gcLogInterval = -1; + private CompactorType compactorType = CompactorType.CHECKPOINT_COMPACTOR; + public SegmentGCOptions(boolean paused, int retryCount, int forceTimeout) { this.paused = paused; this.retryCount = retryCount; @@ -235,7 +252,9 @@ public class SegmentGCOptions { if (offline) { return getClass().getSimpleName() + "{" + "offline=" + offline + - ", retainedGenerations=" + retainedGenerations + "}"; + ", retainedGenerations=" + retainedGenerations + + ", compactorType=" + compactorType + + "}"; } else { return getClass().getSimpleName() + "{" + "paused=" + paused + @@ -244,7 +263,9 @@ public class SegmentGCOptions { ", retryCount=" + retryCount + ", forceTimeout=" + forceTimeout + ", retainedGenerations=" + retainedGenerations + - ", gcType=" + gcType + "}"; + ", gcType=" + gcType + + ", compactorType=" + compactorType + + "}"; } } @@ -341,4 +362,19 @@ public class SegmentGCOptions { return gcLogInterval; } + /** + * @return the current compactor type (i.e. classic or checkpoint-aware) + */ + public CompactorType getCompactorType() { + return compactorType; + } + + /** + * Sets the compactor type to be used for compaction + * @param compactorType + */ + public SegmentGCOptions setCompactorType(CompactorType compactorType) { + this.compactorType = compactorType; + return this; + } } diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java index bcd5ecccf0..90a179324e 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java @@ -27,19 +27,23 @@ import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCStatus.COMPA import static org.apache.jackrabbit.oak.segment.file.TarRevisions.EXPEDITE_OPTION; import static org.apache.jackrabbit.oak.segment.file.TarRevisions.timeout; -import java.io.IOException; - import com.google.common.base.Function; + import org.apache.jackrabbit.oak.segment.CheckpointCompactor; +import org.apache.jackrabbit.oak.segment.ClassicCompactor; +import org.apache.jackrabbit.oak.segment.Compactor; import org.apache.jackrabbit.oak.segment.RecordId; import org.apache.jackrabbit.oak.segment.SegmentNodeState; import org.apache.jackrabbit.oak.segment.SegmentWriter; +import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.CompactorType; import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.GCType; import org.apache.jackrabbit.oak.segment.file.cancel.Cancellation; import org.apache.jackrabbit.oak.segment.file.cancel.Canceller; import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration; import org.apache.jackrabbit.oak.spi.state.NodeState; +import java.io.IOException; + abstract class AbstractCompactionStrategy implements CompactionStrategy { abstract GCType getCompactionType(); @@ -76,7 +80,7 @@ abstract class AbstractCompactionStrategy implements CompactionStrategy { Context context, NodeState base, NodeState onto, - CheckpointCompactor compactor, + Compactor compactor, Canceller canceller ) throws InterruptedException { RecordId compactedId = setHead(context, headId -> { @@ -138,13 +142,7 @@ abstract class AbstractCompactionStrategy implements CompactionStrategy { Canceller compactionCanceller = context.getCanceller().withShortCircuit(); - CheckpointCompactor compactor = new CheckpointCompactor( - context.getGCListener(), - context.getSegmentReader(), - writer, - context.getBlobStore(), - context.getCompactionMonitor() - ); + Compactor compactor = newCompactor(context, writer); SegmentNodeState head = getHead(context); SegmentNodeState compacted = compactor.compact(base, head, base, compactionCanceller); @@ -238,4 +236,18 @@ abstract class AbstractCompactionStrategy implements CompactionStrategy { } } + private Compactor newCompactor(Context context, SegmentWriter writer) { + CompactorType compactorType = context.getGCOptions().getCompactorType(); + switch (compactorType) { + case CHECKPOINT_COMPACTOR: + return new CheckpointCompactor(context.getGCListener(), context.getSegmentReader(), writer, + context.getBlobStore(), context.getCompactionMonitor()); + case CLASSIC_COMPACTOR: + return new ClassicCompactor(context.getSegmentReader(), writer, context.getBlobStore(), + context.getCompactionMonitor()); + default: + throw new IllegalArgumentException("Unknown compactor type: " + compactorType); + } + } + } diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java index cbe946977a..84f011ad75 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java @@ -85,7 +85,6 @@ interface GarbageCollectionStrategy { FileStoreStats getFileStoreStats(); SegmentReader getSegmentReader(); - } void collectGarbage(Context context) throws IOException; diff --git oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java index e05572a8dc..ebd6e9ba3f 100644 --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java @@ -98,7 +98,6 @@ class GarbageCollector { private final GCNodeWriteMonitor compactionMonitor; - /** * Timestamp of the last time full or tail compaction was successfully * invoked. 0 if never. diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTest.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java similarity index 93% rename from oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTest.java rename to oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java index ccf3d4bd2c..edf5371175 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTest.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java @@ -55,7 +55,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -public class CompactorTest { +public class ClassicCompactorTest { @Rule public TemporaryFolder folder = new TemporaryFolder(new File("target")); @@ -76,7 +76,7 @@ public class CompactorTest { @Test public void testCompact() throws Exception { - Compactor compactor = createCompactor(fileStore, null); + ClassicCompactor compactor = createCompactor(fileStore, null); addTestContent(nodeStore); SegmentNodeState uncompacted = (SegmentNodeState) nodeStore.getRoot(); @@ -97,8 +97,8 @@ public class CompactorTest { @Test public void testExceedUpdateLimit() throws Exception { - Compactor compactor = createCompactor(fileStore, null); - addNodes(nodeStore, Compactor.UPDATE_LIMIT * 2 + 1); + ClassicCompactor compactor = createCompactor(fileStore, null); + addNodes(nodeStore, ClassicCompactor.UPDATE_LIMIT * 2 + 1); SegmentNodeState uncompacted = (SegmentNodeState) nodeStore.getRoot(); SegmentNodeState compacted = compactor.compact(uncompacted, Canceller.newCanceller()); @@ -110,7 +110,7 @@ public class CompactorTest { @Test public void testCancel() throws IOException, CommitFailedException { - Compactor compactor = createCompactor(fileStore, null); + ClassicCompactor compactor = createCompactor(fileStore, null); addTestContent(nodeStore); NodeBuilder builder = nodeStore.getRoot().builder(); builder.setChildNode("cancel").setProperty("cancel", "cancel"); @@ -121,20 +121,20 @@ public class CompactorTest { @Test(expected = IOException.class) public void testIOException() throws IOException, CommitFailedException { - Compactor compactor = createCompactor(fileStore, "IOException"); + ClassicCompactor compactor = createCompactor(fileStore, "IOException"); addTestContent(nodeStore); compactor.compact(nodeStore.getRoot(), Canceller.newCanceller()); } @NotNull - private static Compactor createCompactor(FileStore fileStore, String failOnName) { + private static ClassicCompactor createCompactor(FileStore fileStore, String failOnName) { SegmentWriter writer = defaultSegmentWriterBuilder("c") .withGeneration(newGCGeneration(1, 1, true)) .build(fileStore); if (failOnName != null) { writer = new FailingSegmentWriter(writer, failOnName); } - return new Compactor(fileStore.getReader(), writer, fileStore.getBlobStore(), GCNodeWriteMonitor.EMPTY); + return new ClassicCompactor(fileStore.getReader(), writer, fileStore.getBlobStore(), GCNodeWriteMonitor.EMPTY); } private static void addNodes(SegmentNodeStore nodeStore, int count) diff --git oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java index 034bc22d3a..6b9a06fb0b 100644 --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java @@ -33,7 +33,7 @@ import static org.apache.commons.io.FileUtils.byteCountToDisplaySize; import static org.apache.jackrabbit.oak.api.Type.STRING; import static org.apache.jackrabbit.oak.commons.PathUtils.concat; import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.segment.Compactor.UPDATE_LIMIT; +import static org.apache.jackrabbit.oak.segment.ClassicCompactor.UPDATE_LIMIT; import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.defaultGCOptions; import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder; import static org.junit.Assert.assertEquals;