Index: src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java	(revision 1207039)
+++ src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java	(working copy)
@@ -201,4 +201,10 @@
         return repositoryContext.getExecutor();
     }
 
+    public long getInstanceRevision() {
+        if (repositoryContext.getClusterNode() == null) {
+            return -1;
+        }
+        return repositoryContext.getClusterNode().getRevision();
+    }
 }
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java	(revision 1207039)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java	(working copy)
@@ -36,6 +36,7 @@
 import javax.jcr.RepositoryException;
 
 import org.apache.jackrabbit.core.id.NodeId;
+import org.apache.jackrabbit.core.journal.AbstractJournal;
 import org.apache.jackrabbit.core.query.lucene.directory.DirectoryManager;
 import org.apache.jackrabbit.core.state.ChildNodeEntry;
 import org.apache.jackrabbit.core.state.ItemStateException;
@@ -241,7 +242,7 @@
      *                    neither be indexed nor further traversed
      * @throws IOException if an error occurs
      */
-    MultiIndex(SearchIndex handler, Set<NodeId> excludedIDs) throws IOException {
+    MultiIndex(SearchIndex handler, Set<NodeId> excludedIDs, final Path rootPath) throws IOException {
         this.directoryManager = handler.getDirectoryManager();
         this.redoLogFactory = handler.getRedoLogFactory();
         this.indexDir = directoryManager.getDirectory(".");
@@ -251,6 +252,7 @@
         this.nsMappings = handler.getNamespaceMappings();
 
         indexNames = new IndexInfos(indexDir, "indexes");
+        deleteAllIndexesIfStale();
 
         this.indexHistory = new IndexHistory(indexDir,
                 handler.getMaxHistoryAge() * 1000);
@@ -266,14 +268,13 @@
         merger.setMergeFactor(handler.getMergeFactor());
         merger.setMinMergeDocs(handler.getMinMergeDocs());
 
-        IndexingQueueStore store = new IndexingQueueStore(indexDir);
-
         // initialize indexing queue
-        this.indexingQueue = new IndexingQueue(store);
+        this.indexingQueue = new IndexingQueue(new IndexingQueueStore(indexDir));
 
         // open persistent indexes
-        for (Iterator<?> it = indexNames.iterator(); it.hasNext(); ) {
-            IndexInfo info = (IndexInfo) it.next();
+        Iterator<IndexInfo> iterator = indexNames.iterator();
+        while (iterator.hasNext()) {
+            IndexInfo info = iterator.next();
             String name = info.getName();
             // only open if it still exists
             // it is possible that indexNames still contains a name for
@@ -331,6 +332,12 @@
             flush();
         }
 
+        // create initial index if needed
+        if (numDocs() == 0) {
+            createInitialIndex(handler.getContext().getItemStateManager(),
+                    handler.getContext().getRootId(), rootPath);
+        }
+
         if (indexNames.size() > 0) {
             scheduleFlushTask();
         }
@@ -913,6 +920,39 @@
     }
 
     /**
+     * Removes all the existing sub-indexes if the index is determined to be
+     * stale.
+     * 
+     * An index is considered stale when the Journal's current revision is 0
+     * (new journal / _really_ old journal) and we already have active
+     * sub-indexes. In this situation replaying the EventLog (during the
+     * ClusterNode's sync operation) will result in duplicate documents in the
+     * lucene index.
+     * 
+     * <b>This should happen prior to the initialization of the PersistentIndex
+     * collection ({@link #indexes} as the method only uses the persisted index
+     * names<b>.
+     * 
+     * @see {@link AbstractJournal#sync()}
+     */
+    synchronized private void deleteAllIndexesIfStale() {
+        long instanceRev = handler.getContext().getInstanceRevision();
+        if (instanceRev > 0) {
+            return;
+        }
+        log.debug("index is stale, will remove sub-indexes and re-index the workspace ");
+        Iterator<IndexInfo> iterator = indexNames.iterator();
+        while (iterator.hasNext()) {
+            String name = iterator.next().getName();
+            iterator.remove();
+            synchronized (deletable) {
+                deletable.put(name, System.currentTimeMillis());
+            }
+            log.debug("...moved index {} to deletable", name);
+        }
+    }
+
+    /**
      * Flushes this <code>MultiIndex</code>. Persists all pending changes and
      * resets the redo log.
      *
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(revision 1207039)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(working copy)
@@ -535,18 +535,15 @@
         // initialize the Tika parser
         parser = createParser();
 
-        index = new MultiIndex(this, excludedIDs);
-        if (index.numDocs() == 0) {
-            Path rootPath;
-            if (excludedIDs.isEmpty()) {
-                // this is the index for jcr:system
-                rootPath = JCR_SYSTEM_PATH;
-            } else {
-                rootPath = ROOT_PATH;
-            }
-            index.createInitialIndex(context.getItemStateManager(),
-                    context.getRootId(), rootPath);
+        Path rootPath;
+        if (excludedIDs.isEmpty()) {
+            // this is the index for jcr:system
+            rootPath = JCR_SYSTEM_PATH;
+        } else {
+            rootPath = ROOT_PATH;
         }
+        index = new MultiIndex(this, excludedIDs, rootPath);
+        
         if (consistencyCheckEnabled
                 && (index.getRedoLogApplied() || forceConsistencyCheck)) {
             log.info("Running consistency check...");
@@ -633,7 +630,6 @@
             if (state != null) {
                 NodeId id = state.getNodeId();
                 addedIds.add(id);
-                removedIds.remove(id);
                 retrieveAggregateRoot(state, aggregateRoots);
 
                 try {
Index: src/main/java/org/apache/jackrabbit/core/SearchManager.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/SearchManager.java	(revision 1207039)
+++ src/main/java/org/apache/jackrabbit/core/SearchManager.java	(working copy)
@@ -332,10 +332,6 @@
                 long type = e.getType();
                 if (type == Event.NODE_ADDED) {
                     addedNodes.put(e.getChildId(), e);
-                    // quick'n dirty fix for JCR-905
-                    if (e.isExternal()) {
-                        removedNodes.add(e.getChildId());
-                    }
                     if (e.isShareableChildNode()) {
                         // simply re-index shareable nodes
                         removedNodes.add(e.getChildId());
