diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java
index ec8fbfd..da7aacb 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java
@@ -38,12 +38,12 @@ class Record {
     /**
      * Identifier of the segment that contains this record.
      */
-    private final SegmentId segmentId;
+    private volatile SegmentId segmentId;
 
     /**
      * Segment offset of this record.
      */
-    private final int offset;
+    private volatile int offset;
 
     /**
      * Creates a new object for the identified record.
@@ -123,6 +123,15 @@ class Record {
         return getOffset(bytes + ids * Segment.RECORD_ID_BYTES);
     }
 
+    protected void resetRecordId(RecordId newId){
+        this.segmentId = newId.getSegmentId();
+        this.offset = newId.getOffset();
+    }
+
+    protected SegmentId getSegmentId(){
+        return segmentId;
+    }
+
     //------------------------------------------------------------< Object >--
 
     @Override
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java
index 7ed19a0..9e04235 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java
@@ -46,6 +46,8 @@ public class SegmentId implements Comparable<SegmentId> {
 
     private final long lsb;
 
+    private final long creationTime;
+
     /**
      * A reference to the segment object, if it is available in memory. It is
      * used for fast lookup. The segment tracker will set or reset this field.
@@ -53,15 +55,18 @@ public class SegmentId implements Comparable<SegmentId> {
     // TODO: possibly we could remove the volatile
     private volatile Segment segment;
 
-    public SegmentId(SegmentTracker tracker, long msb, long lsb, Segment segment) {
+    private volatile boolean stale;
+
+    public SegmentId(SegmentTracker tracker, long msb, long lsb, Segment segment, long birthTime) {
         this.tracker = tracker;
         this.msb = msb;
         this.lsb = lsb;
         this.segment = segment;
+        this.creationTime = birthTime;
     }
 
     public SegmentId(SegmentTracker tracker, long msb, long lsb) {
-        this(tracker, msb, lsb, null);
+        this(tracker, msb, lsb, null, System.currentTimeMillis());
     }
 
     /**
@@ -102,6 +107,7 @@ public class SegmentId implements Comparable<SegmentId> {
                 if (segment == null) {
                     log.debug("Loading segment {}", this);
                     segment = tracker.getSegment(this);
+                    stale = false;
                 }
             }
         }
@@ -117,6 +123,20 @@ public class SegmentId implements Comparable<SegmentId> {
         return tracker;
     }
 
+    public long getCreationTime() {
+        return creationTime;
+    }
+
+    public synchronized void markStale(){
+        this.segment = null;
+        this.stale = true;
+    }
+
+    public boolean isStale(){
+        return stale;
+    }
+
+
     //--------------------------------------------------------< Comparable >--
 
     @Override
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java
index 6c14a5e..69ec725 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java
@@ -76,7 +76,7 @@ public class SegmentIdTable {
             reference = references.get(index);
         }
 
-        SegmentId id = new SegmentId(tracker, msb, lsb);
+        SegmentId id = new SegmentId(tracker, msb, lsb, null, tracker.getClock().getTime());
         references.set(index, new WeakReference<SegmentId>(id));
         if (index != first) {
             refresh();
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
index 7fedb1b..6c716f9 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
@@ -22,14 +22,20 @@ import java.util.List;
 import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
 import org.apache.jackrabbit.oak.api.PropertyState;
 import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState;
 import org.apache.jackrabbit.oak.plugins.memory.MemoryChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.AbstractChildNodeEntry;
 import org.apache.jackrabbit.oak.spi.state.AbstractNodeState;
 import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStateDiff;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
@@ -53,16 +59,28 @@ import static org.apache.jackrabbit.oak.spi.state.AbstractNodeState.checkValidNa
  * currently doesn't cache data (but the template is fully loaded).
  */
 public class SegmentNodeState extends Record implements NodeState {
+    private static final Logger log = LoggerFactory.getLogger(SegmentNodeState.class);
 
     private volatile RecordId templateId = null;
 
     private volatile Template template = null;
 
+    private final String path;
+
     public SegmentNodeState(RecordId id) {
+        this(id, null);
+    }
+
+    public SegmentNodeState(RecordId id, String path) {
         super(id);
+        this.path = path;
     }
 
     RecordId getTemplateId() {
+        if (templateId != null && getSegmentId().isStale()) {
+            templateId = null;
+        }
+
         if (templateId == null) {
             // no problem if updated concurrently,
             // as each concurrent thread will just get the same value
@@ -72,6 +90,11 @@ public class SegmentNodeState extends Record implements NodeState {
     }
 
     Template getTemplate() {
+        if (template != null && getSegmentId().isStale()) {
+            templateId = null;
+            template = null;
+        }
+
         if (template == null) {
             // no problem if updated concurrently,
             // as each concurrent thread will just get the same value
@@ -343,13 +366,13 @@ public class SegmentNodeState extends Record implements NodeState {
         if (childName == Template.MANY_CHILD_NODES) {
             MapEntry child = getChildNodeMap().getEntry(name);
             if (child != null) {
-                return child.getNodeState();
+                return toNodeState(child);
             }
         } else if (childName != Template.ZERO_CHILD_NODES
                 && childName.equals(name)) {
             Segment segment = getSegment();
             RecordId childNodeId = segment.readRecordId(getOffset(0, 1));
-            return new SegmentNodeState(childNodeId);
+            return new SegmentNodeState(childNodeId, getChildPath(name));
         }
         checkValidName(name);
         return MISSING_NODE;
@@ -373,12 +396,29 @@ public class SegmentNodeState extends Record implements NodeState {
         if (childName == Template.ZERO_CHILD_NODES) {
             return Collections.emptyList();
         } else if (childName == Template.MANY_CHILD_NODES) {
-            return getChildNodeMap().getEntries();
+            return Iterables.transform(getChildNodeMap().getEntries(), new Function<MapEntry, ChildNodeEntry>() {
+                @Override
+                public ChildNodeEntry apply(final MapEntry input) {
+                    return new AbstractChildNodeEntry() {
+                        @Nonnull
+                        @Override
+                        public String getName() {
+                            return input.getName();
+                        }
+
+                        @Nonnull
+                        @Override
+                        public NodeState getNodeState() {
+                            return toNodeState(input);
+                        }
+                    };
+                }
+            });
         } else {
             Segment segment = getSegment();
             RecordId childNodeId = segment.readRecordId(getOffset(0, 1));
             return Collections.singletonList(new MemoryChildNodeEntry(
-                    childName, new SegmentNodeState(childNodeId)));
+                    childName, new SegmentNodeState(childNodeId, getChildPath(childName))));
         }
     }
 
@@ -547,6 +587,51 @@ public class SegmentNodeState extends Record implements NodeState {
         return true;
     }
 
+    @Override
+    protected Segment getSegment() {
+        try {
+            return super.getSegment();
+        } catch (SegmentNotFoundException e) {
+            if (path != null) {
+                SegmentNodeState se = locateUpdatedState();
+                if (se != null) {
+                    resetRecordId(se.getRecordId());
+                    this.templateId = null;
+                    this.template = null;
+                    log.info("Relinking done for {}", path);
+                    return super.getSegment();
+                }
+            }
+            throw e;
+        }
+    }
+
+    private SegmentNodeState locateUpdatedState() {
+        //TODO This access differs slightly from SegmentNodeStore and might
+        //fetch more latest head
+        NodeState state = getTracker().getStore().getHead().getChildNode(SegmentNodeStore.ROOT);
+        for (String element : PathUtils.elements(path)) {
+            state = state.getChildNode(element);
+        }
+        if (state instanceof SegmentNodeState) {
+            return (SegmentNodeState) state;
+        }
+        return null;
+    }
+
+    private SegmentNodeState toNodeState(MapEntry me) {
+        return new SegmentNodeState(me.getValue(), getChildPath(me.getName()));
+    }
+
+    private String getChildPath(String name) {
+        if (path != null) {
+            return PathUtils.concat(path, name);
+        } else if (SegmentNodeStore.ROOT.equals(name)) {
+            return "/";
+        }
+        return null;
+    }
+
     private static boolean compareProperties(
             PropertyState before, PropertyState after, NodeStateDiff diff) {
         if (before == null) {
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java
index 5798188..5795dfd 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java
@@ -25,11 +25,13 @@ import java.security.SecureRandom;
 import java.util.LinkedList;
 import java.util.Queue;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import javax.annotation.Nonnull;
 
 import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector;
+import org.apache.jackrabbit.oak.stats.Clock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -91,9 +93,12 @@ public class SegmentTracker {
 
     private final LinkedList<Segment> segments = newLinkedList();
 
+    private final Clock clock;
+
     private long currentSize = 0;
 
-    public SegmentTracker(SegmentStore store, int cacheSizeMB) {
+    public SegmentTracker(SegmentStore store, int cacheSizeMB, Clock clock) {
+        this.clock = clock;
         for (int i = 0; i < tables.length; i++) {
             tables[i] = new SegmentIdTable(this);
         }
@@ -103,8 +108,12 @@ public class SegmentTracker {
         this.cacheSize = cacheSizeMB * MB;
     }
 
+    public SegmentTracker(SegmentStore store, Clock clock) {
+        this(store, DEFAULT_MEMORY_CACHE_SIZE, clock);
+    }
+
     public SegmentTracker(SegmentStore store) {
-        this(store, DEFAULT_MEMORY_CACHE_SIZE);
+        this(store, DEFAULT_MEMORY_CACHE_SIZE, Clock.SIMPLE);
     }
 
     public SegmentWriter getWriter() {
@@ -160,6 +169,11 @@ public class SegmentTracker {
         compactionMap.set(compaction);
     }
 
+    public synchronized void dropCache() {
+        segments.clear();
+        currentSize = 0;
+    }
+
     @Nonnull
     CompactionMap getCompactionMap() {
         return compactionMap.get();
@@ -220,6 +234,10 @@ public class SegmentTracker {
         return tables[index].getSegmentId(msb, lsb);
     }
 
+    public Clock getClock() {
+        return clock;
+    }
+
     SegmentId newDataSegmentId() {
         return newSegmentId(DATA);
     }
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java
index c332db4..eb7adb1 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java
@@ -36,6 +36,7 @@ import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileLock;
 import java.util.Arrays;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
@@ -43,6 +44,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Matcher;
@@ -65,6 +67,7 @@ import org.apache.jackrabbit.oak.plugins.segment.SegmentWriter;
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.stats.Clock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -152,7 +155,7 @@ public class FileStore implements SegmentStore {
 
     public FileStore(BlobStore blobStore, File directory, int maxFileSizeMB, boolean memoryMapping)
             throws IOException {
-        this(blobStore, directory, EMPTY_NODE, maxFileSizeMB, 0, memoryMapping);
+        this(blobStore, directory, EMPTY_NODE, maxFileSizeMB, 0, memoryMapping, Clock.SIMPLE);
     }
 
     public FileStore(File directory, int maxFileSizeMB, boolean memoryMapping)
@@ -167,18 +170,18 @@ public class FileStore implements SegmentStore {
 
     public FileStore(File directory, int maxFileSizeMB, int cacheSizeMB,
             boolean memoryMapping) throws IOException {
-        this(null, directory, EMPTY_NODE, maxFileSizeMB, cacheSizeMB, memoryMapping);
+        this(null, directory, EMPTY_NODE, maxFileSizeMB, cacheSizeMB, memoryMapping, Clock.SIMPLE);
     }
 
     public FileStore(
             BlobStore blobStore, final File directory, NodeState initial,
-            int maxFileSizeMB, int cacheSizeMB, boolean memoryMapping)
+            int maxFileSizeMB, int cacheSizeMB, boolean memoryMapping, Clock clock)
             throws IOException {
         checkNotNull(directory).mkdirs();
         if (cacheSizeMB > 0) {
-            this.tracker = new SegmentTracker(this, cacheSizeMB);
+            this.tracker = new SegmentTracker(this, cacheSizeMB, clock);
         } else {
-            this.tracker = new SegmentTracker(this);
+            this.tracker = new SegmentTracker(this, clock);
         }
         this.blobStore = blobStore;
         this.directory = directory;
@@ -437,6 +440,10 @@ public class FileStore implements SegmentStore {
         }
     }
 
+    public synchronized void cleanup() throws IOException {
+        this.cleanup(System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(2));
+    }
+
     /**
      * Runs garbage collection on the segment level, which could write new
      * generations of tar files. It checks which segments are still reachable,
@@ -445,7 +452,7 @@ public class FileStore implements SegmentStore {
      * A new generation of a tar file is created (and segments are only
      * discarded) if doing so releases more than 25% of the space in a tar file.
      */
-    public synchronized void cleanup() throws IOException {
+    public synchronized void cleanup(long lastCreationTime) throws IOException {
         Stopwatch watch = Stopwatch.createStarted();
         long initialSize = size();
         log.info("TarMK revision cleanup started. Current repository size {}",
@@ -453,10 +460,23 @@ public class FileStore implements SegmentStore {
 
         // Suggest to the JVM that now would be a good time
         // to clear stale weak references in the SegmentTracker
+        getTracker().dropCache();
         System.gc();
 
+        if (lastCreationTime > 0) {
+            log.info("SegmentIds older than {} would not be considered while determining root set",
+                    new Date(lastCreationTime));
+        }
+
+        final SegmentId headId = getHead().getRecordId().getSegmentId();
         Set<UUID> ids = newHashSet();
         for (SegmentId id : tracker.getReferencedSegmentIds()) {
+            if(lastCreationTime > 0
+                    && id.getCreationTime() < lastCreationTime
+                    && id != headId){
+                id.markStale();
+                continue;
+            }
             ids.add(new UUID(
                     id.getMostSignificantBits(),
                     id.getLeastSignificantBits()));
diff --git oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionAndCleanupTest.java oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionAndCleanupTest.java
index 90d4f11..80e262a 100644
--- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionAndCleanupTest.java
+++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionAndCleanupTest.java
@@ -33,17 +33,20 @@ import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.stats.Clock;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 
+import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class CompactionAndCleanupTest {
 
     private File directory;
+    public static final int MB = 1024 * 1024;
 
     @Before
     public void setUp() throws IOException {
@@ -54,7 +57,6 @@ public class CompactionAndCleanupTest {
     }
 
     @Test
-    @Ignore("OAK-2045")
     public void compactionAndWeakReferenceMagic() throws Exception{
         final int MB = 1024 * 1024;
         final int blobSize = 5 * MB;
@@ -118,6 +120,74 @@ public class CompactionAndCleanupTest {
         assertEquals(mb(fileStore.size()), mb(blobSize));
     }
 
+    @Test
+    public void compactionAndAgeing() throws Exception{
+        final int blobSize = 5 * MB;
+
+        Clock clock = new Clock.Virtual();
+        FileStore fileStore = new FileStore(null,directory, EMPTY_NODE, 1, 1, false, clock);
+        SegmentNodeStore nodeStore = new SegmentNodeStore(fileStore);
+
+        //1. Create a property with 5 MB blob
+        NodeBuilder builder = nodeStore.getRoot().builder();
+        builder.setProperty("a1", createBlob(nodeStore, blobSize));
+        builder.setProperty("b", "foo");
+        builder.child("bar").setProperty("name", "batman");
+
+        //Keep a reference to this nodeState to simulate long
+        //running session
+        NodeState ns1 = nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+        NodeState ns2 = ns1.getChildNode("bar");
+        long removalTime = clock.getTime() + 1;
+        clock.waitUntil(removalTime);
+
+        System.out.printf("File store pre removal %d%n", mb(fileStore.size()));
+        assertEquals(mb(fileStore.size()), mb(blobSize));
+        assertEquals("foo", ns1.getString("b"));
+
+
+        //2. Now remove the property and update the name
+        builder = nodeStore.getRoot().builder();
+        builder.removeProperty("a1");
+        builder.child("bar").setProperty("name", "joker");
+        builder.child("city").setProperty("name", "gotham");
+        builder.child("foo").child("bar").setProperty("name", "batwoman");
+        nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+        //Size remains same
+        System.out.printf("File store pre compaction %d%n", mb(fileStore.size()));
+        assertEquals(mb(fileStore.size()), mb(blobSize));
+
+        //3. Compact
+        fileStore.compact();
+
+        //Size still remains same
+        System.out.printf("File store post compaction %d%n", mb(fileStore.size()));
+        assertEquals(mb(fileStore.size()), mb(blobSize));
+
+        //4. Add some more property to flush the current TarWriter
+        builder = nodeStore.getRoot().builder();
+        builder.setProperty("a2", createBlob(nodeStore, blobSize));
+        nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+        //Size is double
+        System.out.printf("File store pre cleanup %d%n", mb(fileStore.size()));
+        assertEquals(mb(fileStore.size()), 2 * mb(blobSize));
+
+        //5. Cleanup. Ignore references with time earlier than removal time
+        fileStore.cleanup(removalTime);
+
+        //Size should now come back to 5 and deleted data
+        //space reclaimed
+        System.out.printf("File store post cleanup and nullification %d%n", mb(fileStore.size()));
+        assertEquals(mb(fileStore.size()), mb(blobSize));
+
+        //Old state should be linked to current head now
+        assertEquals("joker", ns1.getChildNode("bar").getString("name"));
+        assertEquals("gotham", ns1.getChildNode("city").getString("name"));
+        assertEquals("joker", ns2.getString("name"));
+    }
+
     @After
     public void cleanDir() throws IOException {
         FileUtils.deleteDirectory(directory);
@@ -126,7 +196,7 @@ public class CompactionAndCleanupTest {
     private static void cleanup(FileStore fileStore) throws IOException {
         fileStore.getTracker().setCompactionMap(new Compactor(null).getCompactionMap());
         fileStore.getTracker().getWriter().dropCache();
-
+        fileStore.getTracker().dropCache();
         fileStore.cleanup();
     }
 
@@ -142,7 +212,6 @@ public class CompactionAndCleanupTest {
 
     @Test
     public void testGainEstimator() throws Exception {
-        final int MB = 1024 * 1024;
         final int blobSize = 2 * MB;
 
         FileStore fileStore = new FileStore(directory, 2, false);
diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
index 377fbc9..1759f71 100644
--- oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
+++ oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java
@@ -36,6 +36,7 @@ import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.plugins.segment.SegmentStore;
 import org.apache.jackrabbit.oak.plugins.segment.file.FileStore;
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.stats.Clock;
 
 public abstract class OakFixture {
 
@@ -385,7 +386,7 @@ public abstract class OakFixture {
                 stores[i] = new FileStore(blobStore,
                         new File(base, unique),
                         EmptyNodeState.EMPTY_NODE,
-                        maxFileSizeMB, cacheSizeMB, memoryMapping);
+                        maxFileSizeMB, cacheSizeMB, memoryMapping, Clock.SIMPLE);
                 cluster[i] = new Oak(new SegmentNodeStore(stores[i]));
             }
             return cluster;
