diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java index 5452b1f..8e63273 100644 --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java @@ -49,6 +49,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -199,6 +200,8 @@ public class FileStore implements SegmentStore { */ private volatile boolean shutdown; + private static final AtomicLong runId = new AtomicLong(0); + /** * Create a new instance of a {@link Builder} for a file store. * @param directory directory where the tar files are stored @@ -518,7 +521,7 @@ public class FileStore implements SegmentStore { } public boolean maybeCompact(boolean cleanup) { - gcMonitor.info("TarMK compaction started"); + gcMonitor.info("TarMK GarbageCollection #{}: started", runId.incrementAndGet()); Runtime runtime = Runtime.getRuntime(); long avail = runtime.totalMemory() - runtime.freeMemory(); @@ -529,10 +532,12 @@ public class FileStore implements SegmentStore { long needed = delta * compactionStrategy.getMemoryThreshold(); if (needed >= avail) { gcMonitor.skipped( - "Not enough available memory {}, needed {}, last merge delta {}, so skipping compaction for now", - humanReadableByteCount(avail), - humanReadableByteCount(needed), - humanReadableByteCount(delta)); + "TarMK GarbageCollection #{}: not enough available memory {} ({} bytes), needed {} ({} bytes)," + + " last merge delta {} ({} bytes), so skipping compaction for now", + runId, + humanReadableByteCount(avail), avail, + humanReadableByteCount(needed), needed, + humanReadableByteCount(delta), delta); if (cleanup) { cleanupNeeded.set(true); } @@ -550,10 +555,11 @@ public class FileStore implements SegmentStore { byte gainThreshold = compactionStrategy.getGainThreshold(); boolean runCompaction = true; if (gainThreshold > 0) { + gcMonitor.info("TarMK GarbageCollection #{}: estimation started", runId); Supplier shutdown = newShutdownSignal(); CompactionGainEstimate estimate = estimateCompactionGain(shutdown); if (shutdown.get()) { - gcMonitor.info("Compaction estimation interrupted. Skipping compaction."); + gcMonitor.info("TarMK GarbageCollection #{}: estimation interrupted. Skipping compaction.", runId); return false; } @@ -561,24 +567,29 @@ public class FileStore implements SegmentStore { runCompaction = gain >= gainThreshold; if (runCompaction) { gcMonitor.info( - "Estimated compaction in {}, gain is {}% ({}/{}) or ({}/{}), so running compaction", - watch, gain, estimate.getReachableSize(), estimate.getTotalSize(), - humanReadableByteCount(estimate.getReachableSize()), humanReadableByteCount(estimate.getTotalSize())); + "TarMK GarbageCollection #{}: estimation completed in {} ({} ms). " + + "Gain is {}% or {}/{} ({}/{} bytes), so running compaction", + runId, watch, watch.elapsed(TimeUnit.MILLISECONDS), + gain, humanReadableByteCount(estimate.getReachableSize()), humanReadableByteCount(estimate.getTotalSize()), + estimate.getReachableSize(), estimate.getTotalSize()); } else { if (estimate.getTotalSize() == 0) { gcMonitor.skipped( - "Estimated compaction in {}. Skipping compaction for now as repository consists " + - "of a single tar file only", watch); + "TarMK GarbageCollection #{}: estimation completed in {} ({} ms). " + + "Skipping compaction for now as repository consists of a single tar file only", + runId, watch, watch.elapsed(TimeUnit.MILLISECONDS)); } else { gcMonitor.skipped( - "Estimated compaction in {}, gain is {}% ({}/{}) or ({}/{}), so skipping compaction for now", - watch, gain, estimate.getReachableSize(), estimate.getTotalSize(), - humanReadableByteCount(estimate.getReachableSize()), humanReadableByteCount(estimate.getTotalSize())); + "TarMK GarbageCollection #{}: estimation completed in {} ({} ms). " + + "Gain is {}% or {}/{} ({}/{} bytes), so skipping compaction for now", + runId, watch, watch.elapsed(TimeUnit.MILLISECONDS), + gain, humanReadableByteCount(estimate.getReachableSize()), humanReadableByteCount(estimate.getTotalSize()), + estimate.getReachableSize(), estimate.getTotalSize()); } } } else { - gcMonitor.info("Compaction estimation is skipped due to threshold value ({}). Running compaction", - gainThreshold); + gcMonitor.info("TarMK GarbageCollection #{}: estimation skipped due to gain threshold value ({}). Running compaction", + runId, gainThreshold); } if (runCompaction) { @@ -586,7 +597,7 @@ public class FileStore implements SegmentStore { compact(); compacted = true; } else { - gcMonitor.skipped("TarMK compaction paused"); + gcMonitor.skipped("TarMK GarbageCollection #{}: compaction paused", runId); } } if (cleanup) { @@ -774,8 +785,8 @@ public class FileStore implements SegmentStore { Map cleaned = newLinkedHashMap(); synchronized (this) { - gcMonitor.info("TarMK revision cleanup started. Current repository size {}", - humanReadableByteCount(initialSize)); + gcMonitor.info("TarMK GarbageCollection #{}: cleanup started. Current repository size is {} ({} bytes)", + runId, humanReadableByteCount(initialSize), initialSize); newWriter(); tracker.clearCache(); @@ -801,7 +812,7 @@ public class FileStore implements SegmentStore { for (TarReader reader : cleaned.keySet()) { cleaned.put(reader, reader.cleanup(referencedIds, cm, cleanedIds)); if (shutdown) { - gcMonitor.info("TarMK revision cleanup interrupted"); + gcMonitor.info("TarMK GarbageCollection #{}: cleanup interrupted", runId); break; } } @@ -832,7 +843,7 @@ public class FileStore implements SegmentStore { for (TarReader oldReader : oldReaders) { closeAndLogOnFail(oldReader); File file = oldReader.getFile(); - gcMonitor.info("TarMK revision cleanup marking file for deletion: {}", file.getName()); + gcMonitor.info("TarMK GarbageCollection #{}: cleanup marking file for deletion: {}", runId, file.getName()); toRemove.addLast(file); } @@ -840,12 +851,13 @@ public class FileStore implements SegmentStore { long finalSize = size(); approximateSize.set(finalSize); gcMonitor.cleaned(initialSize - finalSize, finalSize); - gcMonitor.info("TarMK revision cleanup completed in {}. Post cleanup size is {} " + - "and space reclaimed {}. Compaction map weight/depth is {}/{}.", watch, - humanReadableByteCount(finalSize), - humanReadableByteCount(initialSize - finalSize), - humanReadableByteCount(sum(cm.getEstimatedWeights())), - cm.getDepth()); + gcMonitor.info("TarMK GarbageCollection #{}: cleanup completed in {} ({} ms). Post cleanup size is {} ({} bytes)" + + "and space reclaimed {} ({} bytes). Compaction map weight/depth is {}/{} ({} bytes/{}).", + runId, watch, watch.elapsed(TimeUnit.MILLISECONDS), + humanReadableByteCount(finalSize), finalSize, + humanReadableByteCount(initialSize - finalSize), initialSize - finalSize, + humanReadableByteCount(sum(cm.getEstimatedWeights())), cm.getDepth(), + sum(cm.getEstimatedWeights()), cm.getDepth()); return toRemove; } @@ -921,8 +933,8 @@ public class FileStore implements SegmentStore { public void compact() { checkArgument(!compactionStrategy.equals(NO_COMPACTION), "You must set a compactionStrategy before calling compact"); - gcMonitor.info("TarMK compaction running, strategy={}", compactionStrategy); - long start = System.currentTimeMillis(); + gcMonitor.info("TarMK GarbageCollection #{}: compaction started, strategy={}", runId, compactionStrategy); + Stopwatch watch = Stopwatch.createStarted(); Supplier compactionCanceled = newCancelCompactionCondition(); Compactor compactor = new Compactor(this, compactionStrategy, compactionCanceled); SegmentNodeState before = getHead(); @@ -930,14 +942,14 @@ public class FileStore implements SegmentStore { .getChildNodeCount(Long.MAX_VALUE); if (existing > 1) { gcMonitor.warn( - "TarMK compaction found {} checkpoints, you might need to run checkpoint cleanup", - existing); + "TarMK GarbageCollection #{}: compaction found {} checkpoints, you might need to run checkpoint cleanup", + runId, existing); } SegmentNodeState after = compactor.compact(EMPTY_NODE, before, EMPTY_NODE); if (compactionCanceled.get()) { - gcMonitor.warn("TarMK compaction was canceled: {}", compactionCanceled); + gcMonitor.warn("TarMK GarbageCollection #{}: compaction canceled: {}", runId, compactionCanceled); return; } @@ -950,13 +962,13 @@ public class FileStore implements SegmentStore { // Some other concurrent changes have been made. // Rebase (and compact) those changes on top of the // compacted state before retrying to set the head. - gcMonitor.info("TarMK compaction detected concurrent commits while compacting. " + - "Compacting these commits. Cycle {}", cycles); + gcMonitor.info("TarMK GarbageCollection #{}: compaction detected concurrent commits while compacting. " + + "Compacting these commits. Cycle {}", runId, cycles); SegmentNodeState head = getHead(); after = compactor.compact(before, head, after); if (compactionCanceled.get()) { - gcMonitor.warn("TarMK compaction was canceled: {}", compactionCanceled); + gcMonitor.warn("TarMK GarbageCollection #{}: compaction canceled: {}", runId, compactionCanceled); return; } @@ -964,21 +976,21 @@ public class FileStore implements SegmentStore { setHead = new SetHead(head, after, compactor); } if (!success) { - gcMonitor.info("TarMK compaction gave up compacting concurrent commits after " + - "{} cycles.", cycles - 1); + gcMonitor.info("TarMK GarbageCollection #{}: compaction gave up compacting concurrent commits after {} cycles.", + runId, cycles - 1); if (compactionStrategy.getForceAfterFail()) { - gcMonitor.info("TarMK compaction force compacting remaining commits"); + gcMonitor.info("TarMK GarbageCollection #{}: compaction force compacting remaining commits", runId); if (!forceCompact(before, after, compactor)) { - gcMonitor.warn("TarMK compaction failed to force compact remaining commits. " + - "Most likely compaction didn't get exclusive access to the store."); + gcMonitor.warn("TarMK GarbageCollection #{}: compaction failed to force compact remaining commits. " + + "Most likely compaction didn't get exclusive access to the store.", runId); } } } - gcMonitor.info("TarMK compaction completed after {} cycles in {}ms", - cycles - 1, System.currentTimeMillis() - start); + gcMonitor.info("TarMK GarbageCollection #{}: compaction completed in {} ({} ms), after {} cycles", + runId, watch, watch.elapsed(TimeUnit.MILLISECONDS), cycles - 1); } catch (Exception e) { - gcMonitor.error("Error while running TarMK compaction", e); + gcMonitor.error("TarMK GarbageCollection #" + runId + ": compaction encountered an error", e); } }