diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
index d815562..3b5ecab 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
@@ -18,6 +18,7 @@ package org.apache.jackrabbit.oak.plugins.document;
 
 import java.io.InputStream;
 import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
 
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
@@ -463,6 +464,7 @@ public class DocumentMK implements MicroKernel {
         private boolean useSimpleRevision;
         private long splitDocumentAgeMillis = 5 * 60 * 1000;
         private long offHeapCacheSize = -1;
+        private long maxReplicationLagMillis = TimeUnit.HOURS.toMillis(6);
         private Clock clock = Clock.SIMPLE;
         private Executor executor;
 
@@ -730,6 +732,15 @@ public class DocumentMK implements MicroKernel {
             return clock;
         }
 
+        public Builder setMaxReplicationLag(long duration, TimeUnit unit){
+            maxReplicationLagMillis = unit.toMillis(duration);
+            return this;
+        }
+
+        public long getMaxReplicationLagMillis() {
+            return maxReplicationLagMillis;
+        }
+
         /**
          * Open the DocumentMK instance using the configured options.
          *
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
index 5e16cc4..db1a41c 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
@@ -25,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Dictionary;
 import java.util.Hashtable;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
@@ -153,6 +152,9 @@ public class DocumentNodeStoreService {
     public static final String PROP_BLOB_GC_MAX_AGE = "blobGcMaxAgeInSecs";
     private long blobGcMaxAgeInSecs = DEFAULT_BLOB_GC_MAX_AGE;
 
+    private static final long DEFAULT_MAX_REPLICATION_LAG = TimeUnit.HOURS.toSeconds(6);
+    public static final String PROP_REPLICATION_LAG = "maxReplicationLagInSecs";
+    private long maxReplicationLagInSecs = DEFAULT_MAX_REPLICATION_LAG;
 
     @Activate
     protected void activate(ComponentContext context, Map<String, ?> config) throws Exception {
@@ -160,6 +162,8 @@ public class DocumentNodeStoreService {
         this.whiteboard = new OsgiWhiteboard(context.getBundleContext());
         this.executor = new WhiteboardExecutor();
         executor.start(whiteboard);
+        this.maxReplicationLagInSecs = PropertiesUtil.toLong(config.get(PROP_REPLICATION_LAG),
+                DEFAULT_MAX_REPLICATION_LAG);
 
         if (blobStore == null &&
                 PropertiesUtil.toBoolean(prop(CUSTOM_BLOB_STORE), false)) {
@@ -234,14 +238,16 @@ public class DocumentNodeStoreService {
                 // Take care around not logging the uri directly as it
                 // might contain passwords
                 String type = useMK ? "MK" : "NodeStore";
-                log.info("Starting Document{} with host={}, db={}, cache size (MB)={}, Off Heap Cache size (MB)={}, 'changes' collection size (MB)={}",
-                        type, mongoURI.getHosts(), db, cacheSize, offHeapCache, changesSize);
+                log.info("Starting Document{} with host={}, db={}, cache size (MB)={}, Off Heap Cache size (MB)={}, " +
+                                "'changes' collection size (MB)={}, maxReplicationLagInSecs={}",
+                        type, mongoURI.getHosts(), db, cacheSize, offHeapCache, changesSize, maxReplicationLagInSecs);
                 log.info("Mongo Connection details {}", MongoConnection.toString(mongoURI.getOptions()));
             }
 
             MongoClient client = new MongoClient(mongoURI);
             DB mongoDB = client.getDB(db);
 
+            mkBuilder.setMaxReplicationLag(maxReplicationLagInSecs, TimeUnit.SECONDS);
             mkBuilder.setMongoDB(mongoDB, changesSize);
 
             log.info("Connected to database {}", mongoDB);
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 920cb9c..2a7b152 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -1056,7 +1056,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
             return Collections.emptyList();
         }
         if (revision == null) {
-            return new PropertyHistory(store, this, property);
+            return new PropertyHistory(this, property);
         } else {
             final String mainPath = getMainPath();
             // first try to lookup revision directly
@@ -1065,7 +1065,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
                 Revision r = entry.getKey();
                 int h = entry.getValue().height;
                 String prevId = Utils.getPreviousIdFor(mainPath, r, h);
-                NodeDocument prev = store.find(Collection.NODES, prevId);
+                NodeDocument prev = getPreviousDocument(prevId);
                 if (prev != null) {
                     if (prev.getValueMap(property).containsKey(revision)) {
                         return Collections.singleton(prev);
@@ -1094,6 +1094,12 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
         }
     }
 
+    NodeDocument getPreviousDocument(String prevId){
+        //Use the maxAge variant such that in case of Mongo call for
+        //previous doc are directed towards replicas first
+        return store.find(Collection.NODES, prevId, Integer.MAX_VALUE);
+    }
+
     @Nonnull
     Iterator<NodeDocument> getAllPreviousDocs() {
         if (getPreviousRanges().isEmpty()) {
@@ -1123,9 +1129,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
     private NodeDocument getPreviousDoc(Revision rev, Range range){
         int h = range.height;
         String prevId = Utils.getPreviousIdFor(getMainPath(), rev, h);
-        //TODO Use the maxAge variant such that in case of Mongo call for
-        //previous doc are directed towards replicas first
-        NodeDocument prev = store.find(Collection.NODES, prevId);
+        NodeDocument prev = getPreviousDocument(prevId);
         if (prev != null) {
             return prev;
         } else {
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/PropertyHistory.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/PropertyHistory.java
index 796b594..b5ee216 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/PropertyHistory.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/PropertyHistory.java
@@ -45,16 +45,13 @@ class PropertyHistory implements Iterable<NodeDocument> {
 
     private static final Logger LOG = LoggerFactory.getLogger(PropertyHistory.class);
 
-    private final DocumentStore store;
     private final NodeDocument doc;
     private final String property;
     // path of the main document
     private final String mainPath;
 
-    public PropertyHistory(@Nonnull DocumentStore store,
-                           @Nonnull NodeDocument doc,
+    public PropertyHistory(@Nonnull NodeDocument doc,
                            @Nonnull String property) {
-        this.store = checkNotNull(store);
         this.doc = checkNotNull(doc);
         this.property = checkNotNull(property);
         this.mainPath = doc.getMainPath();
@@ -70,7 +67,7 @@ class PropertyHistory implements Iterable<NodeDocument> {
                 Revision r = input.getKey();
                 int h = input.getValue().height;
                 String prevId = Utils.getPreviousIdFor(mainPath, r, h);
-                NodeDocument prev = store.find(Collection.NODES, prevId);
+                NodeDocument prev = doc.getPreviousDocument(prevId);
                 if (prev == null) {
                     LOG.warn("Document with previous revisions not found: " + prevId);
                     return null;
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/CacheInvalidator.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/CacheInvalidator.java
index 773e73f..6b55abd 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/CacheInvalidator.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/CacheInvalidator.java
@@ -37,6 +37,7 @@ import com.mongodb.DBCursor;
 import com.mongodb.DBObject;
 import com.mongodb.QueryBuilder;
 
+import com.mongodb.ReadPreference;
 import org.apache.jackrabbit.oak.cache.CacheValue;
 import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.document.CachedNodeDocument;
@@ -135,6 +136,7 @@ abstract class CacheInvalidator {
 
             // Fetch lastRev for each such node
             DBCursor cursor = nodes.find(query.get(), keys);
+            cursor.setReadPreference(ReadPreference.primary());
             result.queryCount++;
 
             for (DBObject obj : cursor) {
@@ -222,6 +224,7 @@ abstract class CacheInvalidator {
 
                         // Fetch lastRev and modCount for each such nodes
                         DBCursor cursor = nodes.find(query.get(), keys);
+                        cursor.setReadPreference(ReadPreference.primary());
                         LOG.debug(
                                 "Checking for changed nodes at level {} with {} paths",
                                 tn.level(), sameLevelNodes.size());
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoBlobReferenceIterator.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoBlobReferenceIterator.java
index ee10ef6..e717638 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoBlobReferenceIterator.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoBlobReferenceIterator.java
@@ -28,7 +28,6 @@ import com.mongodb.DBCollection;
 import com.mongodb.DBCursor;
 import com.mongodb.DBObject;
 import com.mongodb.QueryBuilder;
-import com.mongodb.ReadPreference;
 import org.apache.jackrabbit.oak.api.Blob;
 import org.apache.jackrabbit.oak.plugins.document.BlobCollector;
 import org.apache.jackrabbit.oak.plugins.document.Collection;
@@ -78,7 +77,8 @@ public class MongoBlobReferenceIterator extends AbstractIterator<Blob> implement
                     .is(NodeDocument.HAS_BINARY_VAL)
                     .get();
             //TODO It currently prefers secondary. Would that be Ok?
-            cursor = getNodeCollection().find(query).setReadPreference(ReadPreference.secondaryPreferred());
+            cursor = getNodeCollection().find(query)
+                    .setReadPreference(documentStore.getConfiguredReadPreference(Collection.NODES));
         }
     }
 
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
index 229c353..485674e 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
@@ -34,9 +34,9 @@ import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
-import com.google.common.base.Splitter;
-
 import com.google.common.collect.Lists;
+import com.mongodb.MongoClientURI;
+import com.mongodb.ReadPreference;
 import org.apache.jackrabbit.mk.api.MicroKernelException;
 import org.apache.jackrabbit.oak.cache.CacheStats;
 import org.apache.jackrabbit.oak.cache.CacheValue;
@@ -56,6 +56,8 @@ import org.apache.jackrabbit.oak.plugins.document.cache.ForwardingListener;
 import org.apache.jackrabbit.oak.plugins.document.cache.NodeDocOffHeapCache;
 import org.apache.jackrabbit.oak.plugins.document.cache.OffHeapCache;
 import org.apache.jackrabbit.oak.plugins.document.util.StringValue;
+import org.apache.jackrabbit.oak.plugins.document.util.Utils;
+import org.apache.jackrabbit.oak.stats.Clock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -72,7 +74,6 @@ import com.mongodb.DBCursor;
 import com.mongodb.DBObject;
 import com.mongodb.MongoException;
 import com.mongodb.QueryBuilder;
-import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
 import com.mongodb.WriteResult;
 
@@ -87,6 +88,13 @@ public class MongoDocumentStore implements CachingDocumentStore {
 
     private static final DBObject BY_ID_ASC = new BasicDBObject(Document.ID, 1);
 
+    static enum DocumentReadPreference {
+        PRIMARY,
+        PREFER_PRIMARY,
+        PREFER_SECONDARY,
+        PREFER_SECONDARY_IF_OLD_ENOUGH
+    }
+
     public static final int IN_CLAUSE_BATCH_SIZE = 500;
 
     private final DBCollection nodes;
@@ -112,6 +120,10 @@ public class MongoDocumentStore implements CachingDocumentStore {
      */
     private final Comparator<Revision> comparator = StableRevisionComparator.REVERSE;
 
+    private Clock clock = Clock.SIMPLE;
+
+    private final long maxReplicationLagMillis;
+
     private String lastReadWriteMode;
 
     public MongoDocumentStore(DB db, DocumentMK.Builder builder) {
@@ -122,6 +134,8 @@ public class MongoDocumentStore implements CachingDocumentStore {
         settings = db.getCollection(
                 Collection.SETTINGS.toString());
 
+        maxReplicationLagMillis = builder.getMaxReplicationLagMillis();
+
         // indexes:
         // the _id field is the primary key, so we don't need to define it
         DBObject index = new BasicDBObject();
@@ -176,9 +190,7 @@ public class MongoDocumentStore implements CachingDocumentStore {
                 .recordStats()
                 .build();
 
-        Cache<CacheValue, NodeDocument> cache =
-                new NodeDocOffHeapCache(primaryCache, listener, builder, this);
-        return cache;
+        return new NodeDocOffHeapCache(primaryCache, listener, builder, this);
     }
 
     private static long start() {
@@ -231,25 +243,32 @@ public class MongoDocumentStore implements CachingDocumentStore {
 
     @Override
     public <T extends Document> T find(Collection<T> collection, String key) {
-        return find(collection, key, Integer.MAX_VALUE);
+        return find(collection, key, true, -1);
     }
 
-    @SuppressWarnings("unchecked")
     @Override
     public <T extends Document> T find(final Collection<T> collection,
                                        final String key,
                                        int maxCacheAge) {
+        return find(collection, key, false, maxCacheAge);
+    }
+
+    @SuppressWarnings("unchecked")
+    private <T extends Document> T find(final Collection<T> collection,
+                                       final String key,
+                                       boolean preferCached,
+                                       final int maxCacheAge) {
         if (collection != Collection.NODES) {
-            return findUncached(collection, key);
+            return findUncached(collection, key, DocumentReadPreference.PRIMARY);
         }
         CacheValue cacheKey = new StringValue(key);
         NodeDocument doc;
-        if (maxCacheAge > 0) {
+        if (maxCacheAge > 0 || preferCached) {
             // first try without lock
             doc = nodesCache.getIfPresent(cacheKey);
             if (doc != null) {
-                if (maxCacheAge == Integer.MAX_VALUE ||
-                        System.currentTimeMillis() - doc.getCreated() < maxCacheAge) {
+                if (preferCached ||
+                        getTime() - doc.getCreated() < maxCacheAge) {
                     if (doc == NodeDocument.NULL) {
                         return null;
                     }
@@ -267,17 +286,17 @@ public class MongoDocumentStore implements CachingDocumentStore {
                     doc = nodesCache.get(cacheKey, new Callable<NodeDocument>() {
                         @Override
                         public NodeDocument call() throws Exception {
-                            NodeDocument doc = (NodeDocument) findUncached(collection, key);
+                            NodeDocument doc = (NodeDocument) findUncached(collection, key, getReadPreference(maxCacheAge));
                             if (doc == null) {
                                 doc = NodeDocument.NULL;
                             }
                             return doc;
                         }
                     });
-                    if (maxCacheAge == 0 || maxCacheAge == Integer.MAX_VALUE) {
+                    if (maxCacheAge == 0 || preferCached) {
                         break;
                     }
-                    if (System.currentTimeMillis() - doc.getCreated() < maxCacheAge) {
+                    if (getTime() - doc.getCreated() < maxCacheAge) {
                         break;
                     }
                     // too old: invalidate, try again
@@ -297,12 +316,25 @@ public class MongoDocumentStore implements CachingDocumentStore {
     }
 
     @CheckForNull
-    <T extends Document> T findUncached(Collection<T> collection, String key) {
+    private <T extends Document> T findUncached(Collection<T> collection, String key, DocumentReadPreference docReadPref) {
         DBCollection dbCollection = getDBCollection(collection);
         long start = start();
         try {
-            DBObject obj = dbCollection.findOne(getByKeyQuery(key).get());
-            if (obj == null) {
+            ReadPreference readPreference = getMongoReadPreference(collection, Utils.getParentId(key), docReadPref);
+            DBObject obj = dbCollection.findOne(getByKeyQuery(key).get(), null, null, readPreference);
+
+            if (obj == null
+                    && readPreference.isSlaveOk()) {
+                //In case secondary read preference is used and node is not found
+                //then check with primary again as it might happen that node document has not been
+                //replicated. This is required for case like SplitDocument where the SplitDoc is fetched with
+                //maxCacheAge == Integer.MAX_VALUE which results in readPreference of secondary.
+                //In such a case we know that document with such an id must exist
+                //but possibly dut to replication lag it has not reached to secondary. So in that case read again
+                //from primary
+                obj = dbCollection.findOne(getByKeyQuery(key).get(), null, null, ReadPreference.primary());
+            }
+            if(obj == null){
                 return null;
             }
             T doc = convertFromDBObject(collection, obj);
@@ -345,6 +377,9 @@ public class MongoDocumentStore implements CachingDocumentStore {
         long start = start();
         try {
             DBCursor cursor = dbCollection.find(query).sort(BY_ID_ASC);
+            String parentId = Utils.getParentIdFromLowerLimit(fromKey);
+            cursor.setReadPreference(getMongoReadPreference(collection, parentId, getDefaultReadPreference(collection)));
+
             List<T> list;
             try {
                 list = new ArrayList<T>();
@@ -624,6 +659,66 @@ public class MongoDocumentStore implements CachingDocumentStore {
         }
     }
 
+    DocumentReadPreference getReadPreference(int maxCacheAge){
+        if(maxCacheAge >= 0 && maxCacheAge < maxReplicationLagMillis) {
+            return DocumentReadPreference.PRIMARY;
+        } else if(maxCacheAge == Integer.MAX_VALUE){
+            return DocumentReadPreference.PREFER_SECONDARY;
+        } else {
+           return DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH;
+        }
+    }
+
+    DocumentReadPreference getDefaultReadPreference(Collection col){
+        return col == Collection.NODES ? DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH : DocumentReadPreference.PRIMARY;
+    }
+
+    <T extends Document> ReadPreference getMongoReadPreference(Collection<T> collection,
+                                                               String parentId,
+                                                               DocumentReadPreference preference) {
+        switch(preference){
+            case PRIMARY:
+                return ReadPreference.primary();
+            case PREFER_PRIMARY :
+                return ReadPreference.primaryPreferred();
+            case PREFER_SECONDARY :
+                return getConfiguredReadPreference(collection);
+            case PREFER_SECONDARY_IF_OLD_ENOUGH:
+                if(collection != Collection.NODES){
+                    return ReadPreference.primary();
+                }
+
+                //Default to primary preferred such that in case primary is being elected
+                //we can still read from secondary
+                //TODO REVIEW Would that be safe
+                ReadPreference readPreference = ReadPreference.primaryPreferred();
+                if (parentId != null) {
+                    long replicationSafeLimit = getTime() - maxReplicationLagMillis;
+                    NodeDocument cachedDoc = (NodeDocument) getIfCached(collection, parentId);
+                    if (cachedDoc != null && !cachedDoc.hasBeenModifiedSince(replicationSafeLimit)) {
+
+                        //If parent has been modified loooong time back then there children
+                        //would also have not be modified. In that case we can read from secondary
+                        readPreference = getConfiguredReadPreference(collection);
+                    }
+                }
+                return readPreference;
+            default:
+                throw new IllegalArgumentException("Unsupported usage " + preference);
+        }
+    }
+
+    /**
+     * Retrieves the ReadPreference specified for the Mongo DB in use irrespective of
+     * DBCollection. Depending on deployments the user can tweak the default references
+     * to read from secondary and in that also tag secondaries
+     *
+     * @return db level ReadPreference
+     */
+    ReadPreference getConfiguredReadPreference(Collection collection){
+        return getDBCollection(collection).getReadPreference();
+    }
+
     @CheckForNull
     <T extends Document> T convertFromDBObject(@Nonnull Collection<T> collection,
                                                @Nullable DBObject n) {
@@ -919,25 +1014,33 @@ public class MongoDocumentStore implements CachingDocumentStore {
         }
         lastReadWriteMode = readWriteMode;
         try {
-            Map<String, String> map = Splitter.on(", ").withKeyValueSeparator(":").split(readWriteMode);
-            String read = map.get("read");
-            if (read != null) {
-                ReadPreference readPref = ReadPreference.valueOf(read);
-                if (!readPref.equals(nodes.getReadPreference())) {
-                    nodes.setReadPreference(readPref);
-                    LOG.info("Using ReadPreference " + readPref);
-                }
+            String rwModeUri = readWriteMode;
+            if(!readWriteMode.startsWith("mongodb://")){
+                rwModeUri = String.format("mongodb://localhost/?%s", readWriteMode);
             }
-            String write = map.get("write");
-            if (write != null) {
-                WriteConcern writeConcern = WriteConcern.valueOf(write);
-                if (!writeConcern.equals(nodes.getWriteConcern())) {
-                    nodes.setWriteConcern(writeConcern);
-                    LOG.info("Using WriteConcern " + writeConcern);
-                }
+            MongoClientURI uri = new MongoClientURI(rwModeUri);
+            ReadPreference readPref = uri.getOptions().getReadPreference();
+
+            if (!readPref.equals(nodes.getReadPreference())) {
+                nodes.setReadPreference(readPref);
+                LOG.info("Using ReadPreference {} ",readPref);
+            }
+
+            WriteConcern writeConcern = uri.getOptions().getWriteConcern();
+            if (!writeConcern.equals(nodes.getWriteConcern())) {
+                nodes.setWriteConcern(writeConcern);
+                LOG.info("Using WriteConcern " + writeConcern);
             }
         } catch (Exception e) {
             LOG.error("Error setting readWriteMode " + readWriteMode, e);
         }
     }
+
+    private long getTime() {
+        return clock.getTime();
+    }
+
+    void setClock(Clock clock) {
+        this.clock = clock;
+    }
 }
\ No newline at end of file
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoMissingLastRevSeeker.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoMissingLastRevSeeker.java
index a04e9ff..770bb5c 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoMissingLastRevSeeker.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoMissingLastRevSeeker.java
@@ -67,8 +67,7 @@ public class MongoMissingLastRevSeeker extends MissingLastRevSeeker {
         DBCursor cursor =
                 getNodeCollection().find(query)
                         .sort(sortFields)
-                        .setReadPreference(
-                                ReadPreference.secondaryPreferred());
+                        .setReadPreference(ReadPreference.primary());
         return CloseableIterable.wrap(transform(cursor, new Function<DBObject, NodeDocument>() {
             @Override
             public NodeDocument apply(DBObject input) {
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoVersionGCSupport.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoVersionGCSupport.java
index 4bc2d83..1471505 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoVersionGCSupport.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoVersionGCSupport.java
@@ -117,7 +117,7 @@ public class MongoVersionGCSupport extends VersionGCSupport {
         final BasicDBObject keys = new BasicDBObject(Document.ID, 1);
         List<String> ids;
         DBCursor cursor = getNodeCollection().find(query, keys)
-                .setReadPreference(ReadPreference.secondaryPreferred());
+                .setReadPreference(store.getConfiguredReadPreference(Collection.NODES));
         try {
              ids = ImmutableList.copyOf(Iterables.transform(cursor, new Function<DBObject, String>() {
                  @Override
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
index 149e05a..9cde5d9 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
@@ -23,13 +23,12 @@ import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.sql.Timestamp;
 import java.util.Comparator;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
+import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
@@ -61,14 +60,14 @@ public class Utils {
      * possibly be too large to be used for the primary key for the document
      * store.
      */
-    private static final int PATH_SHORT = Integer.getInteger("oak.pathShort", 165);
+    static final int PATH_SHORT = Integer.getInteger("oak.pathShort", 165);
 
     /**
      * The maximum length of the parent path, in bytes. If the parent path is
      * longer, then the id of a document is no longer the path, but the hash of
      * the parent, and then the node name.
      */
-    private static final int PATH_LONG = Integer.getInteger("oak.pathLong", 350);
+    static final int PATH_LONG = Integer.getInteger("oak.pathLong", 350);
 
     /**
      * The maximum size a node name, in bytes. This is only a problem for long path.
@@ -250,6 +249,31 @@ public class Utils {
         return depth + ":" + path;
     }
 
+    /**
+     * Returns the parent id for given id if possible
+     *
+     * <p>It would return null in following cases
+     * <ul>
+     *     <li>If id is from long path</li>
+     *     <li>If id is for root path</li>
+     * </ul>
+     *</p>
+     * @param id id for which parent id needs to be determined
+     * @return parent id. null if parent id cannot be determined
+     */
+    @CheckForNull
+    public static String getParentId(String id){
+        if(Utils.isIdFromLongPath(id)){
+            return null;
+        }
+        String path = Utils.getPathFromId(id);
+        if(PathUtils.denotesRoot(path)){
+            return null;
+        }
+        String parentPath = PathUtils.getParentPath(path);
+        return Utils.getIdFromPath(parentPath);
+    }
+
     public static boolean isLongPath(String path) {
         // the most common case: a short path
         // avoid calculating the parent path
@@ -354,6 +378,23 @@ public class Utils {
     }
 
     /**
+     * Returns parentId extracted from the fromKey. fromKey is usually constructed
+     * using Utils#getKeyLowerLimit
+     *
+     * @param fromKey key used as start key in queries
+     * @return parentId if possible.
+     */
+    @CheckForNull
+    public static String getParentIdFromLowerLimit(String fromKey){
+        //If key just ends with slash 2:/foo/ then append a fake
+        //name to create a proper id
+        if(fromKey.endsWith("/")){
+            fromKey = fromKey + "a";
+        }
+        return getParentId(fromKey);
+    }
+
+    /**
      * Returns <code>true</code> if a revision tagged with the given revision
      * should be considered committed, <code>false</code> otherwise. Committed
      * revisions have a tag, which equals 'c' or starts with 'c-'.
diff --git oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/ReadPreferenceIT.java oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/ReadPreferenceIT.java
new file mode 100644
index 0000000..9b938c3
--- /dev/null
+++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/mongo/ReadPreferenceIT.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.document.mongo;
+
+import java.util.concurrent.TimeUnit;
+
+import com.mongodb.ReadPreference;
+import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
+import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore;
+import org.apache.jackrabbit.oak.plugins.document.MongoUtils;
+import org.apache.jackrabbit.oak.plugins.document.Revision;
+import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection;
+import org.apache.jackrabbit.oak.plugins.document.util.Utils;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.stats.Clock;
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
+import static org.apache.jackrabbit.oak.plugins.document.Collection.SETTINGS;
+import static org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.DocumentReadPreference;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ReadPreferenceIT {
+
+    private DocumentNodeStore documentNodeStore;
+    private NodeStore nodeStore;
+
+    private MongoDocumentStore mongoDS;
+
+    private Clock clock;
+
+    private long replicationLag;
+
+    @BeforeClass
+    public static void checkMongoDbAvailable() {
+        Assume.assumeNotNull(MongoUtils.getConnection());
+    }
+
+    @Before
+    public void prepareStores() throws Exception {
+        clock = new Clock.Virtual();
+        replicationLag = TimeUnit.SECONDS.toMillis(10);
+        MongoConnection mc = MongoUtils.getConnection();
+        documentNodeStore = new DocumentMK.Builder()
+                .setMaxReplicationLag(replicationLag, TimeUnit.MILLISECONDS)
+                .setMongoDB(mc.getDB())
+                .setClusterId(1)
+                .getNodeStore();
+        mongoDS = (MongoDocumentStore) documentNodeStore.getDocumentStore();
+        nodeStore = documentNodeStore;
+    }
+
+    @After
+    public void clearDB() {
+        MongoUtils.dropCollections(mongoDS.getDBCollection(NODES).getDB());
+    }
+
+    @Test
+    public void testPreferenceConversion() throws Exception{
+        //For cacheAge < replicationLag result should be primary
+        assertEquals(DocumentReadPreference.PRIMARY, mongoDS.getReadPreference(0));
+        assertEquals(DocumentReadPreference.PRIMARY,
+                mongoDS.getReadPreference((int) (replicationLag - 100)));
+
+        //For Integer.MAX_VALUE it should be secondary as caller intends that value is stable
+        assertEquals(DocumentReadPreference.PREFER_SECONDARY,
+                mongoDS.getReadPreference(Integer.MAX_VALUE));
+
+        //For all other cases depends on age
+        assertEquals(DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH,
+                mongoDS.getReadPreference(-1));
+        assertEquals(DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH,
+                mongoDS.getReadPreference((int) (replicationLag + 100)));
+    }
+
+    @Test
+    public void testMongoReadPreferencesDefault() throws Exception{
+        assertEquals(ReadPreference.primary(),
+                mongoDS.getMongoReadPreference(NODES,"foo", DocumentReadPreference.PRIMARY));
+
+        assertEquals(ReadPreference.primaryPreferred(),
+                mongoDS.getMongoReadPreference(NODES,"foo", DocumentReadPreference.PREFER_PRIMARY));
+
+        //By default Mongo read preference is primary
+        assertEquals(ReadPreference.primary(),
+                mongoDS.getMongoReadPreference(NODES,"foo", DocumentReadPreference.PREFER_SECONDARY));
+
+        //Change the default and assert again
+        mongoDS.getDBCollection(NODES).getDB().setReadPreference(ReadPreference.secondary());
+        assertEquals(ReadPreference.secondary(),
+                mongoDS.getMongoReadPreference(NODES,"foo", DocumentReadPreference.PREFER_SECONDARY));
+
+        //for case where parent age cannot be determined the preference should be primaryPreferred
+        assertEquals(ReadPreference.primaryPreferred(),
+                mongoDS.getMongoReadPreference(NODES,"foo", DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH));
+
+        //For collection other than NODES always primary
+        assertEquals(ReadPreference.primary(),
+                mongoDS.getMongoReadPreference(SETTINGS,"foo", DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH));
+
+    }
+
+    @Test
+    public void testMongoReadPreferencesWithAge() throws Exception{
+        //Change the default
+        ReadPreference testPref = ReadPreference.secondary();
+        mongoDS.getDBCollection(NODES).getDB().setReadPreference(testPref);
+
+        NodeBuilder b1 = nodeStore.getRoot().builder();
+        b1.child("x").child("y");
+        nodeStore.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+        String id = Utils.getIdFromPath("/x/y");
+        String parentId = Utils.getParentId(id);
+        mongoDS.invalidateCache(NODES,id);
+
+        //For modifiedTime < replicationLag primary should be preferred
+        assertEquals(ReadPreference.primaryPreferred(),
+                mongoDS.getMongoReadPreference(NODES,parentId, DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH));
+
+        //Going into future to make parent /x old enough
+        clock.waitUntil(Revision.getCurrentTimestamp() + replicationLag);
+        mongoDS.setClock(clock);
+
+        //For old modified nodes secondaries should be preferred
+        assertEquals(testPref,
+                mongoDS.getMongoReadPreference(NODES, parentId, DocumentReadPreference.PREFER_SECONDARY_IF_OLD_ENOUGH));
+    }
+
+    @Test
+    public void testReadWriteMode() throws Exception{
+        assertEquals(ReadPreference.primary(), mongoDS.getConfiguredReadPreference(NODES));
+
+        mongoDS.setReadWriteMode("readPreference=secondary&w=2&safe=true&j=true");
+
+        assertEquals(ReadPreference.secondary(), mongoDS.getDBCollection(NODES).getReadPreference());
+        assertEquals(2, mongoDS.getDBCollection(NODES).getWriteConcern().getW());
+        assertTrue(mongoDS.getDBCollection(NODES).getWriteConcern().getJ());
+
+        assertEquals(ReadPreference.secondary(), mongoDS.getConfiguredReadPreference(NODES));
+    }
+}
diff --git oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
index 155c58d..d6d8151 100644
--- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
+++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
@@ -16,11 +16,15 @@
  */
 package org.apache.jackrabbit.oak.plugins.document.util;
 
+import com.google.common.base.Strings;
+import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.document.Revision;
 import org.junit.Ignore;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Tests for {@link Utils}.
@@ -38,6 +42,23 @@ public class UtilsTest {
                 Utils.getPreviousIdFor("/a/b/c/d/e/f/g/h/i/j/k/l/m", r, 3));
     }
 
+    @Test
+    public void getParentIdFromLowerLimit() throws Exception{
+        assertEquals("1:/foo",Utils.getParentIdFromLowerLimit(Utils.getKeyLowerLimit("/foo")));
+        assertEquals("1:/foo",Utils.getParentIdFromLowerLimit("2:/foo/bar"));
+    }
+
+    @Test
+    public void getParentId() throws Exception{
+        String longPath = PathUtils.concat("/"+Strings.repeat("p", Utils.PATH_LONG + 1), "foo");
+        assertTrue(Utils.isLongPath(longPath));
+
+        assertNull(Utils.getParentId(Utils.getIdFromPath(longPath)));
+
+        assertNull(Utils.getParentId(Utils.getIdFromPath("/")));
+        assertEquals("1:/foo",Utils.getParentId("2:/foo/bar"));
+    }
+
     @Ignore("Performance test")
     @Test
     public void performance_getPreviousIdFor() {
