Index: src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java	(revision 0)
+++ src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java	(revision 0)
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.cluster;
+
+import java.io.File;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.util.UUID;
+
+import javax.jcr.Node;
+import javax.jcr.RepositoryException;
+import javax.jcr.Session;
+import javax.jcr.SimpleCredentials;
+import javax.jcr.query.Query;
+import javax.jcr.query.RowIterator;
+
+import junit.framework.Assert;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.commons.JcrUtils;
+import org.apache.jackrabbit.core.RepositoryImpl;
+import org.apache.jackrabbit.core.config.RepositoryConfig;
+import org.h2.tools.Server;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test for JCR3162
+ */
+public class DbClusterTestJCR3162 {
+
+    public static String JCR3162_ENABLE_CLUSTER_DELETE = "JCR3162";
+    public static String JCR3162_ENABLE_REINDEX = "JCR3162";
+
+    static {
+        System.setProperty(JCR3162_ENABLE_CLUSTER_DELETE, "" + Boolean.FALSE);
+        System.setProperty(JCR3162_ENABLE_REINDEX, "" + Boolean.TRUE);
+    }
+
+    private static final SimpleCredentials ADMIN = new SimpleCredentials(
+            "admin", "admin".toCharArray());
+
+    private Server server1;
+    private Server server2;
+
+    private RepositoryImpl rep1;
+    private RepositoryImpl rep2;
+
+    private String clusterId1 = UUID.randomUUID().toString();
+    private String clusterId2 = UUID.randomUUID().toString();
+
+    @Before
+    public void setUp() throws Exception {
+        deleteAll();
+        server1 = Server.createTcpServer("-tcpPort", "9001", "-baseDir",
+                "./target/dbClusterTest/db1", "-tcpAllowOthers").start();
+        server2 = Server.createTcpServer("-tcpPort", "9002", "-baseDir",
+                "./target/dbClusterTest/db2", "-tcpAllowOthers").start();
+        FileUtils
+                .copyFile(
+                        new File(
+                                "./src/test/resources/org/apache/jackrabbit/core/cluster/repository-h2.xml"),
+                        new File("./target/dbClusterTest/node1/repository.xml"));
+        FileUtils
+                .copyFile(
+                        new File(
+                                "./src/test/resources/org/apache/jackrabbit/core/cluster/repository-h2.xml"),
+                        new File("./target/dbClusterTest/node2/repository.xml"));
+
+        System.setProperty(ClusterNode.SYSTEM_PROPERTY_NODE_ID, clusterId1);
+        rep1 = RepositoryImpl.create(RepositoryConfig.create(new File(
+                "./target/dbClusterTest/node1")));
+
+        System.setProperty(ClusterNode.SYSTEM_PROPERTY_NODE_ID, clusterId2);
+        rep2 = RepositoryImpl.create(RepositoryConfig.create(new File(
+                "./target/dbClusterTest/node2")));
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        try {
+            rep1.shutdown();
+            rep2.shutdown();
+        } finally {
+            server1.stop();
+            server2.stop();
+            deleteAll();
+        }
+    }
+
+    private void deleteAll() throws IOException {
+        FileUtils.deleteDirectory(new File("./target/dbClusterTest"));
+    }
+
+    @Test
+    public void test() throws RepositoryException {
+        Session s1 = rep1.login(ADMIN);
+        Session s2 = rep2.login(ADMIN);
+
+        int count = 5;
+
+        // 1. create
+        Node n = s1.getRootNode().addNode(
+                "test-cluster-" + System.currentTimeMillis(),
+                JcrConstants.NT_UNSTRUCTURED);
+        for (int i = 0; i < count; i++) {
+            n.addNode("child_" + i);
+        }
+        s1.save();
+
+        // 2. sync & verify
+        checkConsistency(s2, n.getPath(), count);
+
+        // 3. rollback journal on box #2 to 0
+        resetJournalRev();
+
+        // 4. sync & verify again
+        // rep2 was restarted, need to login again
+        s2 = rep2.login(ADMIN);
+        checkConsistency(s2, n.getPath(), count);
+    }
+
+    private void resetJournalRev() {
+        Connection con = null;
+        try {
+            rep2.shutdown();
+            con = DriverManager.getConnection(
+                    "jdbc:h2:tcp://localhost:9001,localhost:9002/db", "sa",
+                    "sa");
+            PreparedStatement prep = con
+                    .prepareStatement("update JOURNAL_LOCAL_REVISIONS set REVISION_ID=0 where JOURNAL_ID=?");
+            prep.setString(1, clusterId2);
+            prep.executeUpdate();
+            prep.close();
+        } catch (Exception e) {
+            e.printStackTrace();
+            Assert.fail("Unable to reset revision to 0. " + e.getMessage());
+        } finally {
+            if (con != null) {
+                try {
+                    con.close();
+                } catch (Exception e) {
+                    // e.printStackTrace();
+                }
+            }
+
+            try {
+                rep2 = RepositoryImpl.create(RepositoryConfig.create(new File(
+                        "./target/dbClusterTest/node2")));
+            } catch (Exception e) {
+                e.printStackTrace();
+                Assert.fail("Unable to restart repo2. " + e.getMessage());
+            }
+        }
+    }
+
+    private void checkConsistency(Session s, String path, int nodes)
+            throws RepositoryException {
+
+        s.refresh(true);
+        Node n = s.getNode(path);
+        Assert.assertNotNull(n);
+
+        int found = 0;
+        for (Node c : JcrUtils.getChildNodes(n)) {
+            found++;
+        }
+        Assert.assertEquals(nodes, found);
+
+        RowIterator result = s
+                .getWorkspace()
+                .getQueryManager()
+                .createQuery(
+                        "SELECT * FROM [" + JcrConstants.NT_UNSTRUCTURED
+                                + "] as NODE WHERE ischildnode(NODE, [" + path
+                                + "])", Query.JCR_SQL2).execute().getRows();
+
+        int foundViaQuery = 0;
+        while (result.hasNext()) {
+            result.next();
+            foundViaQuery++;
+        }
+        Assert.assertEquals(nodes, foundViaQuery);
+    }
+}
Index: src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java	(revision 1207039)
+++ src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java	(working copy)
@@ -201,4 +201,10 @@
         return repositoryContext.getExecutor();
     }
 
+    public long getInstanceRevision() {
+        if (repositoryContext.getClusterNode() == null) {
+            return -1;
+        }
+        return repositoryContext.getClusterNode().getRevision();
+    }
 }
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java	(revision 1209064)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java	(working copy)
@@ -35,7 +35,9 @@
 
 import javax.jcr.RepositoryException;
 
+import org.apache.jackrabbit.core.cluster.DbClusterTestJCR3162;
 import org.apache.jackrabbit.core.id.NodeId;
+import org.apache.jackrabbit.core.journal.AbstractJournal;
 import org.apache.jackrabbit.core.query.lucene.directory.DirectoryManager;
 import org.apache.jackrabbit.core.state.ChildNodeEntry;
 import org.apache.jackrabbit.core.state.ItemStateException;
@@ -241,7 +243,7 @@
      *                    neither be indexed nor further traversed
      * @throws IOException if an error occurs
      */
-    MultiIndex(SearchIndex handler, Set<NodeId> excludedIDs) throws IOException {
+    MultiIndex(SearchIndex handler, Set<NodeId> excludedIDs, final Path rootPath) throws IOException {
         this.directoryManager = handler.getDirectoryManager();
         this.redoLogFactory = handler.getRedoLogFactory();
         this.indexDir = directoryManager.getDirectory(".");
@@ -252,6 +254,10 @@
 
         indexNames = new IndexInfos(indexDir, "indexes");
 
+        if (Boolean.getBoolean(DbClusterTestJCR3162.JCR3162_ENABLE_REINDEX)) {
+            deleteAllIndexesIfStale();
+        }
+
         this.indexHistory = new IndexHistory(indexDir,
                 handler.getMaxHistoryAge() * 1000);
 
@@ -266,14 +272,13 @@
         merger.setMergeFactor(handler.getMergeFactor());
         merger.setMinMergeDocs(handler.getMinMergeDocs());
 
-        IndexingQueueStore store = new IndexingQueueStore(indexDir);
-
         // initialize indexing queue
-        this.indexingQueue = new IndexingQueue(store);
+        this.indexingQueue = new IndexingQueue(new IndexingQueueStore(indexDir));
 
         // open persistent indexes
-        for (Iterator<?> it = indexNames.iterator(); it.hasNext(); ) {
-            IndexInfo info = (IndexInfo) it.next();
+        Iterator<IndexInfo> iterator = indexNames.iterator();
+        while (iterator.hasNext()) {
+            IndexInfo info = iterator.next();
             String name = info.getName();
             // only open if it still exists
             // it is possible that indexNames still contains a name for
@@ -331,6 +336,12 @@
             flush();
         }
 
+        // create initial index if needed
+        if (numDocs() == 0) {
+            createInitialIndex(handler.getContext().getItemStateManager(),
+                    handler.getContext().getRootId(), rootPath);
+        }
+
         if (indexNames.size() > 0) {
             scheduleFlushTask();
         }
@@ -913,6 +924,39 @@
     }
 
     /**
+     * Removes all the existing sub-indexes if the index is determined to be
+     * stale.
+     * 
+     * An index is considered stale when the Journal's current revision is 0
+     * (new journal / _really_ old journal) and we already have active
+     * sub-indexes. In this situation replaying the EventLog (during the
+     * ClusterNode's sync operation) will result in duplicate documents in the
+     * lucene index.
+     * 
+     * <b>This should happen prior to the initialization of the PersistentIndex
+     * collection ({@link #indexes} as the method only uses the persisted index
+     * names<b>.
+     * 
+     * @see {@link AbstractJournal#sync()}
+     */
+    synchronized private void deleteAllIndexesIfStale() {
+        long instanceRev = handler.getContext().getInstanceRevision();
+        if (instanceRev > 0) {
+            return;
+        }
+        log.debug("index is stale, will remove sub-indexes and re-index the workspace ");
+        Iterator<IndexInfo> iterator = indexNames.iterator();
+        while (iterator.hasNext()) {
+            String name = iterator.next().getName();
+            iterator.remove();
+            synchronized (deletable) {
+                deletable.put(name, System.currentTimeMillis());
+            }
+            log.debug("...moved index {} to deletable", name);
+        }
+    }
+
+    /**
      * Flushes this <code>MultiIndex</code>. Persists all pending changes and
      * resets the redo log.
      *
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(revision 1209064)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(working copy)
@@ -535,18 +535,15 @@
         // initialize the Tika parser
         parser = createParser();
 
-        index = new MultiIndex(this, excludedIDs);
-        if (index.numDocs() == 0) {
-            Path rootPath;
-            if (excludedIDs.isEmpty()) {
-                // this is the index for jcr:system
-                rootPath = JCR_SYSTEM_PATH;
-            } else {
-                rootPath = ROOT_PATH;
-            }
-            index.createInitialIndex(context.getItemStateManager(),
-                    context.getRootId(), rootPath);
+        Path rootPath;
+        if (excludedIDs.isEmpty()) {
+            // this is the index for jcr:system
+            rootPath = JCR_SYSTEM_PATH;
+        } else {
+            rootPath = ROOT_PATH;
         }
+        index = new MultiIndex(this, excludedIDs, rootPath);
+        
         if (consistencyCheckEnabled
                 && (index.getRedoLogApplied() || forceConsistencyCheck)) {
             log.info("Running consistency check...");
@@ -633,7 +630,6 @@
             if (state != null) {
                 NodeId id = state.getNodeId();
                 addedIds.add(id);
-                removedIds.remove(id);
                 retrieveAggregateRoot(state, aggregateRoots);
 
                 try {
Index: src/main/java/org/apache/jackrabbit/core/SearchManager.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/SearchManager.java	(revision 1207039)
+++ src/main/java/org/apache/jackrabbit/core/SearchManager.java	(working copy)
@@ -33,6 +33,7 @@
 import javax.jcr.query.Query;
 import javax.jcr.query.qom.QueryObjectModel;
 
+import org.apache.jackrabbit.core.cluster.DbClusterTestJCR3162;
 import org.apache.jackrabbit.core.config.SearchConfig;
 import org.apache.jackrabbit.core.fs.FileSystem;
 import org.apache.jackrabbit.core.id.NodeId;
@@ -332,9 +333,12 @@
                 long type = e.getType();
                 if (type == Event.NODE_ADDED) {
                     addedNodes.put(e.getChildId(), e);
-                    // quick'n dirty fix for JCR-905
-                    if (e.isExternal()) {
-                        removedNodes.add(e.getChildId());
+                    if (Boolean
+                            .getBoolean(DbClusterTestJCR3162.JCR3162_ENABLE_CLUSTER_DELETE)) {
+                        // quick'n dirty fix for JCR-905
+                        if (e.isExternal()) {
+                            removedNodes.add(e.getChildId());
+                        }
                     }
                     if (e.isShareableChildNode()) {
                         // simply re-index shareable nodes
