diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OrderedIndexConcurrentClusterIT.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OrderedIndexConcurrentClusterIT.java
new file mode 100644
index 0000000..5a75101
--- /dev/null
+++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OrderedIndexConcurrentClusterIT.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.jcr;
+
+import static org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest.dispose;
+
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.jcr.Credentials;
+import javax.jcr.Node;
+import javax.jcr.PropertyType;
+import javax.jcr.Repository;
+import javax.jcr.RepositoryException;
+import javax.jcr.Session;
+import javax.jcr.SimpleCredentials;
+
+import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
+import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection;
+import org.apache.jackrabbit.oak.plugins.index.IndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.property.OrderedIndex;
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class OrderedIndexConcurrentClusterIT {
+    private static final Logger LOG = LoggerFactory.getLogger(OrderedIndexConcurrentClusterIT.class);
+    
+    private static final int NUM_CLUSTER_NODES = 5;
+    private static final int LOOP = 2800;
+    private static final int COUNT = 5;
+    private static final Credentials ADMIN = new SimpleCredentials("admin", "admin".toCharArray());
+    private static final String INDEX_NODE_NAME = "lastModified";
+    private static final String INDEX_PROPERTY = "lastModified";
+    
+    private List<Repository> repos = new ArrayList<Repository>();
+    private List<DocumentMK> mks = new ArrayList<DocumentMK>();
+    private List<Thread> workers = new ArrayList<Thread>();
+
+    // ----- SHARED WITH ConcurrentAddNodesClusterIT (later refactoring) -----
+    // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+    
+    @BeforeClass
+    public static void mongoDBAvailable() {
+        Assume.assumeTrue(OakMongoMKRepositoryStub.isMongoDBAvailable());
+    }
+    
+    private static MongoConnection createConnection() throws Exception {
+        return OakMongoMKRepositoryStub.createConnection(
+            OrderedIndexConcurrentClusterIT.class.getSimpleName());
+    }
+
+    @Before
+    public void before() throws Exception {
+        dropDB();
+        initRepository();
+    }
+
+    private static void dropDB() throws Exception {
+        MongoConnection con = createConnection();
+        try {
+            con.getDB().dropDatabase();
+        } finally {
+            con.close();
+        }
+    }
+
+    private static void initRepository() throws Exception {
+        MongoConnection con = createConnection();
+        DocumentMK mk = new DocumentMK.Builder()
+                .setMongoDB(con.getDB())
+                .setClusterId(1).open();
+        Repository repository = new Jcr(mk.getNodeStore()).createRepository();
+        Session session = repository.login(
+                new SimpleCredentials("admin", "admin".toCharArray()));
+        ensureIndex(session);
+        session.logout();
+        dispose(repository);
+        mk.dispose(); // closes connection as well
+    }
+
+    @After
+    public void after() throws Exception {
+        for (Repository repo : repos) {
+            dispose(repo);
+        }
+        for (DocumentMK mk : mks) {
+            mk.dispose();
+        }
+        dropDB();
+    }
+
+    private final class Worker implements Runnable {
+
+        private final Repository repo;
+        private final Map<String, Exception> exceptions;
+
+        Worker(Repository repo, Map<String, Exception> exceptions) {
+            this.repo = repo;
+            this.exceptions = exceptions;
+        }
+
+        @Override
+        public void run() {
+            try {
+                Session session = repo.login(new SimpleCredentials(
+                        "admin", "admin".toCharArray()));
+                ensureIndex(session);
+
+                String nodeName = "testroot-" + Thread.currentThread().getName();
+                createNodes(session, nodeName, LOOP, COUNT, exceptions);
+            } catch (Exception e) {
+                exceptions.put(Thread.currentThread().getName(), e);
+            }
+        }
+    }
+
+    // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+    // ----- SHARED WITH ConcurrentAddNodesClusterIT (later refactoring) -----
+    
+    // Slightly modified by the ConcurrentAddNodesClusterIT making the indexes property a Date.
+    private void createNodes(Session session,
+                             String nodeName,
+                             int loopCount,
+                             int nodeCount,
+                             Map<String, Exception> exceptions)
+            throws RepositoryException {
+        Node root = session.getRootNode().addNode(nodeName, "nt:unstructured");
+        Calendar calendar = Calendar.getInstance();
+        SimpleDateFormat sdf = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss.S");
+                
+        for (int i = 0; i < loopCount; i++) {
+            Node node = root.addNode("testnode" + i, "nt:unstructured");
+            for (int j = 0; j < nodeCount; j++) {
+                Node child = node.addNode("node" + j, "nt:unstructured");
+                calendar = Calendar.getInstance();
+                child.setProperty(INDEX_PROPERTY, calendar);
+            }
+            if (!exceptions.isEmpty()) {
+                break;
+            }
+            if (LOG.isDebugEnabled() && i % 10 == 0) {
+                LOG.debug("{} looped {}. Last calendar: {}",
+                    new Object[] { nodeName, i, sdf.format(calendar.getTime()) });
+            }
+            session.save();
+        }
+    }
+
+    @Test
+    public void addNodesConcurrently() throws Exception {
+        LOG.debug(
+            "Adding a total of {} nodes evely spread across cluster. Loop: {}, Count: {}, Cluster nodes: {}",
+            new Object[] { LOOP * COUNT * NUM_CLUSTER_NODES, LOOP, COUNT, NUM_CLUSTER_NODES });
+
+        // creating instances
+        for (int i = 1; i <= NUM_CLUSTER_NODES; i++) {
+            DocumentMK mk = new DocumentMK.Builder()
+                    .setMongoDB(createConnection().getDB())
+                    .setClusterId(i).open();
+            mks.add(mk);
+        }
+
+        Map<String, Exception> exceptions = Collections.synchronizedMap(
+            new HashMap<String, Exception>());
+
+        // initialising repositories and creating workers
+        for (int i = 0; i < mks.size(); i++) {
+            DocumentMK mk = mks.get(i);
+            Repository repo = new Jcr(mk.getNodeStore()).createRepository();
+            Session session = repo.login(ADMIN);
+            ensureIndex(session);
+            session.logout();
+            repos.add(repo);
+            workers.add(new Thread(new Worker(repo, exceptions), "Worker-" + (i + 1)));
+        }
+        
+        for (Thread t : workers) {
+            t.start();
+        }
+        for (Thread t : workers) {
+            t.join();
+        }
+        for (Map.Entry<String, Exception> entry : exceptions.entrySet()) {
+            LOG.error("Exception in thread {}", entry.getKey(), entry.getValue());
+            throw entry.getValue();
+        }
+
+    }
+    
+    /**
+     * creates the index in the provided session
+     * 
+     * @param session
+     */
+    private static void ensureIndex(Session session) throws RepositoryException {
+        Node root = session.getRootNode();
+        Node indexDef = root.getNode(IndexConstants.INDEX_DEFINITIONS_NAME);
+        Node index;
+
+        if (!indexDef.hasNode(INDEX_NODE_NAME)) {
+            index = indexDef.addNode(INDEX_NODE_NAME, IndexConstants.INDEX_DEFINITIONS_NODE_TYPE);
+
+            index.setProperty(IndexConstants.TYPE_PROPERTY_NAME, OrderedIndex.TYPE);
+            index.setProperty(IndexConstants.REINDEX_PROPERTY_NAME, true);
+            index.setProperty(IndexConstants.PROPERTY_NAMES, new String[] { INDEX_PROPERTY },
+                PropertyType.NAME);
+            try {
+                root.getSession().save();
+            } catch (RepositoryException e) {
+                // created by other thread -> ignore
+                root.getSession().refresh(false);
+            }
+        }
+    }
+}
