diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
index 4d5ab20..8643bf7 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
@@ -520,6 +520,7 @@ public class DocumentMK {
         private StatisticsProvider statisticsProvider = StatisticsProvider.NOOP;
         private BlobStoreStats blobStoreStats;
         private CacheStats blobStoreCacheStats;
+        private DocumentStoreStatsCollector documentStoreStatsCollector;
 
         public Builder() {
         }
@@ -874,6 +875,18 @@ public class DocumentMK {
             return this;
         }
 
+        public DocumentStoreStatsCollector getDocumentStoreStatsCollector() {
+            if (documentStoreStatsCollector == null) {
+                documentStoreStatsCollector = new DocumentStoreStats(statisticsProvider);
+            }
+            return documentStoreStatsCollector;
+        }
+
+        public Builder setDocumentStoreStatsCollector(DocumentStoreStatsCollector documentStoreStatsCollector) {
+            this.documentStoreStatsCollector = documentStoreStatsCollector;
+            return this;
+        }
+
         @CheckForNull
         public BlobStoreStats getBlobStoreStats() {
             return blobStoreStats;
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
index e33f617..d3a4c11 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java
@@ -641,6 +641,16 @@ public class DocumentNodeStoreService {
             );
         }
 
+        if (mkBuilder.getDocumentStoreStatsCollector() instanceof DocumentStoreStatsMBean) {
+            registrations.add(
+                    registerMBean(whiteboard,
+                            DocumentStoreStatsMBean.class,
+                            (DocumentStoreStatsMBean) mkBuilder.getDocumentStoreStatsCollector(),
+                            DocumentStoreStatsMBean.TYPE,
+                            "DocumentStore Statistics")
+            );
+        }
+
         final long versionGcMaxAgeInSecs = toLong(prop(PROP_VER_GC_MAX_AGE), DEFAULT_VER_GC_MAX_AGE);
         final long blobGcMaxAgeInSecs = toLong(prop(PROP_BLOB_GC_MAX_AGE), DEFAULT_BLOB_GC_MAX_AGE);
 
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
new file mode 100644
index 0000000..49d49cc
--- /dev/null
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStats.java
@@ -0,0 +1,272 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document;
+
+import java.util.concurrent.TimeUnit;
+
+import javax.management.openmbean.CompositeData;
+
+import org.apache.jackrabbit.api.stats.TimeSeries;
+import org.apache.jackrabbit.oak.plugins.document.util.Utils;
+import org.apache.jackrabbit.oak.stats.MeterStats;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
+import org.apache.jackrabbit.oak.stats.StatsOptions;
+import org.apache.jackrabbit.oak.stats.TimerStats;
+import org.apache.jackrabbit.stats.TimeSeriesStatsUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Document Store statistics helper class.
+ */
+public class DocumentStoreStats implements DocumentStoreStatsCollector, DocumentStoreStatsMBean {
+    private final Logger perfLog = LoggerFactory.getLogger(DocumentStoreStats.class.getName() + ".perf");
+
+    public static final int PERF_LOG_THRESHOLD = 1;
+    private static final String FIND_NODES_CACHED = "DOCUMENT_NODES_FIND_CACHED";
+    private static final String FIND_SPLIT_NODES = "DOCUMENT_NODES_FIND_SPLIT";
+    private static final String FIND_NODES_SLAVE = "DOCUMENT_NODES_FIND_SLAVE";
+    private static final String FIND_NODES_PRIMARY = "DOCUMENT_NODES_FIND_PRIMARY";
+    private static final String QUERY_NODES_SLAVE = "DOCUMENT_NODES_QUERY_SLAVE";
+    private static final String QUERY_NODES_PRIMARY = "DOCUMENT_NODES_QUERY_PRIMARY";
+    private static final String QUERY_JOURNAL = "DOCUMENT_JOURNAL_QUERY";
+    private static final String CREATE_JOURNAL = "DOCUMENT_JOURNAL_CREATE";
+    private static final String CREATE_NODES = "DOCUMENT_NODES_CREATE";
+    private static final String UPDATE_NODES = "DOCUMENT_NODES_UPDATE";
+
+    private final MeterStats findNodesCachedMeter;
+    private final TimerStats findNodesMissingTimer;
+    private final MeterStats findNodesSlave;
+    private final TimerStats findNodesTimer;
+    private final MeterStats findNodesPrimary;
+    private final MeterStats queryNodesSlave;
+    private final MeterStats queryNodesPrimary;
+    private final MeterStats queryNodesResult;
+    private final TimerStats queryNodesWithFilterTimer;
+    private final TimerStats queryNodesTimer;
+    private final MeterStats queryJournal;
+    private final TimerStats queryJournalTimer;
+    private final TimerStats createNodeTimer;
+    private final TimerStats updateNodeTimer;
+    private final MeterStats createNodeMeter;
+    private final MeterStats updateNodeMeter;
+    private final MeterStats createJournal;
+    private final TimerStats createJournalTimer;
+    private final MeterStats findSplitNodes;
+    private final StatisticsProvider statisticsProvider;
+
+    public DocumentStoreStats(StatisticsProvider provider) {
+        statisticsProvider = checkNotNull(provider);
+        findNodesCachedMeter = provider.getMeter(FIND_NODES_CACHED, StatsOptions.DEFAULT);
+        findNodesMissingTimer = provider.getTimer("DOCUMENT_NODES_FIND_MISSING", StatsOptions.METRICS_ONLY);
+        findNodesTimer = provider.getTimer("DOCUMENT_NODES_FIND", StatsOptions.METRICS_ONLY);
+        findSplitNodes = provider.getMeter(FIND_SPLIT_NODES, StatsOptions.DEFAULT);
+
+        findNodesSlave = provider.getMeter(FIND_NODES_SLAVE, StatsOptions.DEFAULT);
+        findNodesPrimary = provider.getMeter(FIND_NODES_PRIMARY, StatsOptions.DEFAULT);
+
+        queryNodesSlave = provider.getMeter(QUERY_NODES_SLAVE, StatsOptions.DEFAULT);
+        queryNodesPrimary = provider.getMeter(QUERY_NODES_PRIMARY, StatsOptions.DEFAULT);
+        queryNodesResult = provider.getMeter("DOCUMENT_NODES_QUERY_FIND", StatsOptions.DEFAULT);
+
+        queryNodesWithFilterTimer = provider.getTimer("DOCUMENT_NODES_QUERY_FILTER", StatsOptions.METRICS_ONLY);
+        queryNodesTimer = provider.getTimer("DOCUMENT_NODES_QUERY", StatsOptions.METRICS_ONLY);
+
+        queryJournal = provider.getMeter(QUERY_JOURNAL, StatsOptions.DEFAULT);
+        queryJournalTimer = provider.getTimer("DOCUMENT_JOURNAL_QUERY_TIMER", StatsOptions.METRICS_ONLY);
+        createJournal = provider.getMeter(CREATE_JOURNAL, StatsOptions.DEFAULT);
+        createJournalTimer = provider.getTimer("DOCUMENT_JOURNAL_CREATE_TIMER", StatsOptions.METRICS_ONLY);
+
+        createNodeTimer = provider.getTimer("DOCUMENT_NODES_CREATE_TIMER", StatsOptions.METRICS_ONLY);
+        updateNodeTimer = provider.getTimer("DOCUMENT_NODES_UPDATE_TIMER", StatsOptions.METRICS_ONLY);
+        createNodeMeter = provider.getMeter(CREATE_NODES, StatsOptions.DEFAULT);
+        updateNodeMeter = provider.getMeter(UPDATE_NODES, StatsOptions.DEFAULT);
+    }
+
+    //~------------------------------------------< DocumentStoreStatsCollector >
+
+    @Override
+    public void doneFindCached(Collection collection, String key) {
+        //findCached call is almost done for NODES collection only
+        if (collection == Collection.NODES){
+            findNodesCachedMeter.mark();
+        }
+    }
+
+    @Override
+    public void doneFindUncached(long timeTakenNanos, Collection collection, String key,
+                                 boolean docFound, boolean isSlaveOk) {
+        if (collection == Collection.NODES){
+            //For now collect time for reads from primary/secondary in same timer
+            TimerStats timer = docFound ? findNodesTimer : findNodesMissingTimer;
+            timer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+
+            //For now only nodes can be looked up from slave
+            if (isSlaveOk){
+                findNodesSlave.mark();
+            } else {
+                findNodesPrimary.mark();
+            }
+
+            if (Utils.isPreviousDocId(key)){
+                findSplitNodes.mark();
+            }
+        }
+
+        perfLog(timeTakenNanos, "findUncached on key={}, isSlaveOk={}", key, isSlaveOk);
+    }
+
+    @Override
+    public void doneQuery(long timeTakenNanos, Collection collection, String fromKey, String toKey,
+                          String indexedProperty, int resultSize, long lockTime, boolean isSlaveOk) {
+        if (collection == Collection.NODES){
+            //Distinguish between query done with filter and without filter
+            TimerStats timer = indexedProperty != null ? queryNodesWithFilterTimer : queryNodesTimer;
+            timer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+
+            //Number of nodes read
+            queryNodesResult.mark(resultSize);
+
+            //Stats for queries to slaves
+            if (isSlaveOk){
+                queryNodesSlave.mark();
+            } else {
+                queryNodesPrimary.mark();
+            }
+
+            //TODO What more to gather
+            // - Histogram of result - How the number of children vary
+        } else if (collection == Collection.JOURNAL){
+            //Journals are read from primary and without any extra condition on indexedProperty
+            queryJournal.mark(resultSize);
+            queryJournalTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+        }
+        perfLog(timeTakenNanos, "query for children from [{}] to [{}], lock:{}", fromKey, toKey, lockTime);
+    }
+
+    @Override
+    public void doneCreate(long timeTakenNanos, Collection collection, int size, boolean insertSuccess) {
+        if (collection == Collection.NODES){
+            //Create would be only called for Split documents
+        } else if (collection == Collection.JOURNAL){
+            createJournal.mark(size);
+            createJournalTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+        }
+        perfLog(timeTakenNanos, "create");
+    }
+
+    @Override
+    public void doneUpdate(long timeTakenNanos, Collection collection, int updateCount) {
+        //NODES - Update is called for lastRev update
+        perfLog(timeTakenNanos, "update");
+    }
+
+    @Override
+    public void doneFindAndModify(long timeTakenNanos, Collection collection, String key, boolean newEntry) {
+        if (collection == Collection.NODES){
+            if (newEntry){
+                createNodeMeter.mark();
+                createNodeTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+            } else {
+                updateNodeMeter.mark();
+                updateNodeTimer.update(timeTakenNanos, TimeUnit.NANOSECONDS);
+            }
+        }
+        perfLog(timeTakenNanos, "findAndModify [{}]", key);
+    }
+
+    private void perfLog(long timeTakenNanos, String logMessagePrefix, Object... arguments){
+        if (!perfLog.isDebugEnabled()){
+            return;
+        }
+
+        final long diff = TimeUnit.NANOSECONDS.toMillis(timeTakenNanos);
+        if (perfLog.isTraceEnabled()) {
+            // if log level is TRACE, then always log - and do that on TRACE
+            // then:
+            perfLog.trace(logMessagePrefix + " [took " + diff + "ms]",
+                    (Object[]) arguments);
+        } else if (diff > PERF_LOG_THRESHOLD) {
+            perfLog.debug(logMessagePrefix + " [took " + diff + "ms]",
+                    (Object[]) arguments);
+        }
+    }
+
+    //~--------------------------------------------< DocumentStoreStatsMBean >
+
+    @Override
+    public CompositeData getFindCachedNodesHistory() {
+        return getTimeSeriesData(FIND_NODES_CACHED, FIND_NODES_CACHED);
+    }
+
+    @Override
+    public CompositeData getFindSplitNodesHistory() {
+        return getTimeSeriesData(FIND_SPLIT_NODES, FIND_SPLIT_NODES);
+    }
+
+    @Override
+    public CompositeData getFindNodesFromPrimaryHistory() {
+        return getTimeSeriesData(FIND_NODES_PRIMARY, FIND_NODES_PRIMARY);
+    }
+
+    @Override
+    public CompositeData getFindNodesFromSlaveHistory() {
+        return getTimeSeriesData(FIND_NODES_SLAVE, FIND_NODES_SLAVE);
+    }
+
+    @Override
+    public CompositeData getQueryNodesFromSlaveHistory() {
+        return getTimeSeriesData(QUERY_NODES_SLAVE, QUERY_NODES_SLAVE);
+    }
+
+    @Override
+    public CompositeData getQueryNodesFromPrimaryHistory() {
+        return getTimeSeriesData(QUERY_NODES_PRIMARY, QUERY_NODES_PRIMARY);
+    }
+
+    @Override
+    public CompositeData getQueryJournalHistory() {
+        return getTimeSeriesData(QUERY_JOURNAL, QUERY_JOURNAL);
+    }
+
+    @Override
+    public CompositeData getCreateJournalHistory() {
+        return getTimeSeriesData(CREATE_JOURNAL, CREATE_JOURNAL);
+    }
+
+    @Override
+    public CompositeData getCreateNodesHistory() {
+        return getTimeSeriesData(CREATE_NODES, CREATE_NODES);
+    }
+
+    @Override
+    public CompositeData getUpdateNodesHistory() {
+        return getTimeSeriesData(UPDATE_NODES, UPDATE_NODES);
+    }
+
+    private CompositeData getTimeSeriesData(String name, String desc){
+        return TimeSeriesStatsUtil.asCompositeData(getTimeSeries(name), desc);
+    }
+
+    private TimeSeries getTimeSeries(String name) {
+        return statisticsProvider.getStats().getTimeSeries(name, true);
+    }
+}
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsCollector.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsCollector.java
new file mode 100644
index 0000000..fe833be
--- /dev/null
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsCollector.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.document;
+
+import javax.annotation.Nullable;
+
+public interface DocumentStoreStatsCollector {
+    /**
+     * Called when a document with given key is found from the cache
+     *
+     * @param collection the collection
+     * @param key collection key which is found
+     */
+    void doneFindCached(Collection collection, String key);
+
+    /**
+     * Called when a document with given key is looked from remote store
+     *  @param timeTakenNanos watch for determining time taken
+     * @param collection the collection
+     * @param key collection key
+     * @param docFound true if document is found
+     * @param isSlaveOk true if find was performed against a secondary instance
+     */
+    void doneFindUncached(long timeTakenNanos, Collection collection, String key, boolean docFound, boolean isSlaveOk);
+
+    /**
+     * Called when query with given parameters is performed
+     *  @param timeTakenNanos watch for determining time taken
+     * @param collection the collection
+     * @param fromKey the start value (excluding)
+     * @param toKey the end value (excluding)
+     * @param indexedProperty the name of the indexed property (optional)
+     * @param resultSize number of documents found for given query
+     * @param lockTime time in millis to acquire any lock. If no lock was required then its -1
+     * @param isSlaveOk true if find was performed against a secondary instance
+     */
+    void doneQuery(long timeTakenNanos, Collection collection, String fromKey, String toKey,
+                   @Nullable String indexedProperty, int resultSize, long lockTime, boolean isSlaveOk);
+
+    /**
+     * Called when a document is created in the given collection
+     *  @param timeTakenNanos watch for determining time taken
+     * @param collection the collection
+     * @param size number of documents created
+     * @param insertSuccess true if the insert was successful
+     */
+    void doneCreate(long timeTakenNanos, Collection collection, int size, boolean insertSuccess);
+
+    /**
+     * Called when a given updated has modified multiple documents
+     *  @param timeTakenNanos watch for determining time taken
+     * @param collection the collection
+     * @param updateCount number of updates performed
+     */
+    void doneUpdate(long timeTakenNanos, Collection collection, int updateCount);
+
+    /**
+     * Called when a update operation was completed which affected single
+     * document.
+     *  @param timeTakenNanos watch for determining time taken
+     * @param collection the collection
+     * @param key collection which got updated or inserted
+     * @param newEntry true if the document was newly created due to given operation
+     */
+    void doneFindAndModify(long timeTakenNanos, Collection collection, String key, boolean newEntry);
+}
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
new file mode 100644
index 0000000..465176c
--- /dev/null
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreStatsMBean.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.document;
+
+import javax.management.openmbean.CompositeData;
+
+import aQute.bnd.annotation.ProviderType;
+
+@ProviderType
+public interface DocumentStoreStatsMBean {
+
+    String TYPE = "DocumentStoreStats";
+
+    CompositeData getFindCachedNodesHistory();
+    CompositeData getFindSplitNodesHistory();
+    CompositeData getFindNodesFromPrimaryHistory();
+    CompositeData getFindNodesFromSlaveHistory();
+    CompositeData getQueryNodesFromSlaveHistory();
+    CompositeData getQueryNodesFromPrimaryHistory();
+    CompositeData getQueryJournalHistory();
+    CompositeData getCreateJournalHistory();
+    CompositeData getCreateNodesHistory();
+    CompositeData getUpdateNodesHistory();
+}
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
index 4dd41fe..38f612d 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
@@ -37,6 +37,7 @@ import javax.annotation.CheckForNull;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
+import com.google.common.base.Stopwatch;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.UncheckedExecutionException;
@@ -52,6 +53,7 @@ import org.apache.jackrabbit.oak.plugins.document.Document;
 import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
 import org.apache.jackrabbit.oak.plugins.document.DocumentStore;
 import org.apache.jackrabbit.oak.plugins.document.DocumentStoreException;
+import org.apache.jackrabbit.oak.plugins.document.DocumentStoreStatsCollector;
 import org.apache.jackrabbit.oak.plugins.document.JournalEntry;
 import org.apache.jackrabbit.oak.plugins.document.NodeDocument;
 import org.apache.jackrabbit.oak.plugins.document.Revision;
@@ -164,6 +166,8 @@ public class MongoDocumentStore implements DocumentStore {
 
     private final Map<String, String> metadata;
 
+    private final DocumentStoreStatsCollector stats;
+
     public MongoDocumentStore(DB db, DocumentMK.Builder builder) {
         String version = checkVersion(db);
         metadata = ImmutableMap.<String,String>builder()
@@ -172,6 +176,7 @@ public class MongoDocumentStore implements DocumentStore {
                 .build();
 
         this.db = db;
+        stats = builder.getDocumentStoreStatsCollector();
         nodes = db.getCollection(Collection.NODES.toString());
         clusterNodes = db.getCollection(Collection.CLUSTER_NODES.toString());
         settings = db.getCollection(Collection.SETTINGS.toString());
@@ -341,6 +346,7 @@ public class MongoDocumentStore implements DocumentStore {
             if (doc != null) {
                 if (preferCached ||
                         getTime() - doc.getCreated() < maxCacheAge) {
+                    stats.doneFindCached(collection, key);
                     if (doc == NodeDocument.NULL) {
                         return null;
                     }
@@ -359,6 +365,7 @@ public class MongoDocumentStore implements DocumentStore {
                     if (doc != null) {
                         if (preferCached ||
                                 getTime() - doc.getCreated() < maxCacheAge) {
+                            stats.doneFindCached(collection, key);
                             if (doc == NodeDocument.NULL) {
                                 return null;
                             }
@@ -438,8 +445,9 @@ public class MongoDocumentStore implements DocumentStore {
     protected <T extends Document> T findUncached(Collection<T> collection, String key, DocumentReadPreference docReadPref) {
         log("findUncached", key, docReadPref);
         DBCollection dbCollection = getDBCollection(collection);
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
         boolean isSlaveOk = false;
+        boolean docFound = true;
         try {
             ReadPreference readPreference = getMongoReadPreference(collection, Utils.getParentId(key), docReadPref);
 
@@ -462,6 +470,7 @@ public class MongoDocumentStore implements DocumentStore {
                 obj = dbCollection.findOne(getByKeyQuery(key).get(), null, null, ReadPreference.primary());
             }
             if(obj == null){
+                docFound = false;
                 return null;
             }
             T doc = convertFromDBObject(collection, obj);
@@ -470,8 +479,7 @@ public class MongoDocumentStore implements DocumentStore {
             }
             return doc;
         } finally {
-            PERFLOG.end(start, 1, "findUncached on key={}, isSlaveOk={}", key,
-                    isSlaveOk);
+            stats.doneFindUncached(watch.elapsed(TimeUnit.NANOSECONDS), collection, key, docFound, isSlaveOk);
         }
     }
 
@@ -552,12 +560,12 @@ public class MongoDocumentStore implements DocumentStore {
         DBObject query = queryBuilder.get();
         String parentId = Utils.getParentIdFromLowerLimit(fromKey);
         long lockTime = -1;
-        final long start = PERFLOG.start();
+        final Stopwatch watch  = startWatch();
         Lock lock = withLock ? nodeLocks.acquireExclusive(parentId != null ? parentId : "") : null;
+        boolean isSlaveOk = false;
+        int resultSize = 0;
         try {
-            if (start != -1) {
-                lockTime = System.currentTimeMillis() - start;
-            }
+            lockTime = withLock ? watch.elapsed(TimeUnit.MILLISECONDS) : -1;
             DBCursor cursor = dbCollection.find(query).sort(BY_ID_ASC);
             if (!disableIndexHint) {
                 cursor.hint(hint);
@@ -570,6 +578,7 @@ public class MongoDocumentStore implements DocumentStore {
                     getMongoReadPreference(collection, parentId, getDefaultReadPreference(collection));
 
             if(readPreference.isSlaveOk()){
+                isSlaveOk = true;
                 LOG.trace("Routing call to secondary for fetching children from [{}] to [{}]", fromKey, toKey);
             }
 
@@ -588,6 +597,7 @@ public class MongoDocumentStore implements DocumentStore {
                     }
                     list.add(doc);
                 }
+                resultSize = list.size();
             } finally {
                 cursor.close();
             }
@@ -596,7 +606,8 @@ public class MongoDocumentStore implements DocumentStore {
             if (lock != null) {
                 lock.unlock();
             }
-            PERFLOG.end(start, 1, "query for children from [{}] to [{}], lock:{}", fromKey, toKey, lockTime);
+            stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey,
+                    indexedProperty, resultSize, lockTime, isSlaveOk);
         }
     }
 
@@ -701,7 +712,8 @@ public class MongoDocumentStore implements DocumentStore {
         if (collection == Collection.NODES) {
             lock = nodeLocks.acquire(updateOp.getId());
         }
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
+        boolean newEntry = false;
         try {
             // get modCount of cached document
             Long modCount = null;
@@ -737,6 +749,11 @@ public class MongoDocumentStore implements DocumentStore {
             // perform operation and get complete document
             QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
             DBObject oldNode = dbCollection.findAndModify(query.get(), null, null /*sort*/, false /*remove*/, update, false /*returnNew*/, upsert);
+
+            if (oldNode == null){
+                newEntry = true;
+            }
+
             if (checkConditions && oldNode == null) {
                 return null;
             }
@@ -764,7 +781,7 @@ public class MongoDocumentStore implements DocumentStore {
             if (lock != null) {
                 lock.unlock();
             }
-            PERFLOG.end(start, 1, "findAndModify [{}]", updateOp.getId());
+            stats.doneFindAndModify(watch.elapsed(TimeUnit.NANOSECONDS), collection, updateOp.getId(), newEntry);
         }
     }
 
@@ -853,7 +870,8 @@ public class MongoDocumentStore implements DocumentStore {
         }
 
         DBCollection dbCollection = getDBCollection(collection);
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
+        boolean insertSuccess = false;
         try {
             try {
                 dbCollection.insert(inserts);
@@ -862,12 +880,13 @@ public class MongoDocumentStore implements DocumentStore {
                         nodesCache.putIfAbsent((NodeDocument) doc);
                     }
                 }
+                insertSuccess = true;
                 return true;
             } catch (MongoException e) {
                 return false;
             }
         } finally {
-            PERFLOG.end(start, 1, "create");
+            stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, updateOps.size(), insertSuccess);
         }
     }
 
@@ -882,7 +901,7 @@ public class MongoDocumentStore implements DocumentStore {
         // make sure we don't modify the original updateOp
         updateOp = updateOp.copy();
         DBObject update = createUpdate(updateOp);
-        final long start = PERFLOG.start();
+        final Stopwatch watch = startWatch();
         try {
             Map<String, NodeDocument> cachedDocs = Collections.emptyMap();
             if (collection == Collection.NODES) {
@@ -929,7 +948,7 @@ public class MongoDocumentStore implements DocumentStore {
                 throw DocumentStoreException.convert(e);
             }
         } finally {
-            PERFLOG.end(start, 1, "update");
+            stats.doneUpdate(watch.elapsed(TimeUnit.NANOSECONDS), collection, keys.size());
         }
     }
 
@@ -1220,6 +1239,11 @@ public class MongoDocumentStore implements DocumentStore {
         return doc;
     }
 
+    private Stopwatch startWatch() {
+        return Stopwatch.createStarted();
+    }
+
+
     @Override
     public void setReadWriteMode(String readWriteMode) {
         if (readWriteMode == null || readWriteMode.equals(lastReadWriteMode)) {
diff --git oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
index 6c5a894..269461e 100644
--- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
+++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/Utils.java
@@ -345,6 +345,20 @@ public class Utils {
     }
 
     /**
+     * Determines if the passed id belongs to a previous doc
+     *
+     * @param id id to check
+     * @return true if the id belongs to a previous doc
+     */
+    public static boolean isPreviousDocId(String id){
+        int indexOfColon = id.indexOf(':');
+        if (indexOfColon > 0 && indexOfColon < id.length() - 1){
+            return id.charAt(indexOfColon + 1) == 'p';
+        }
+        return false;
+    }
+
+    /**
      * Deep copy of a map that may contain map values.
      *
      * @param source the source map
diff --git oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
index 8f2a7e6..251360e 100644
--- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
+++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/util/UtilsTest.java
@@ -35,6 +35,7 @@ import org.junit.Ignore;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
@@ -56,6 +57,14 @@ public class UtilsTest {
     }
 
     @Test
+    public void previousDoc() throws Exception{
+        Revision r = new Revision(System.currentTimeMillis(), 0, 0);
+        assertTrue(Utils.isPreviousDocId(Utils.getPreviousIdFor("/", r, 0)));
+        assertTrue(Utils.isPreviousDocId(Utils.getPreviousIdFor("/a/b/c/d/e/f/g/h/i/j/k/l/m", r, 3)));
+        assertFalse(Utils.isPreviousDocId(Utils.getIdFromPath("/a/b")));
+    }
+
+    @Test
     public void getParentIdFromLowerLimit() throws Exception{
         assertEquals("1:/foo",Utils.getParentIdFromLowerLimit(Utils.getKeyLowerLimit("/foo")));
         assertEquals("1:/foo",Utils.getParentIdFromLowerLimit("2:/foo/bar"));
