Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java	(revision 883153)
+++ src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -23,13 +23,13 @@
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.index.IndexWriter.Deletes;
 import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
@@ -68,6 +68,8 @@
   private Ref singleNormRef;
 
   CoreReaders core;
+  Deletes bufferedDeletes;
+  
 
   // Holds core readers that are shared (unchanged) when
   // SegmentReader is cloned or reopened
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 883153)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -17,32 +17,37 @@
  * limitations under the License.
  */
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DocumentsWriter.IndexingChain;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.ThreadInterruptedException;
 
-import java.io.IOException;
-import java.io.Closeable;
-import java.io.PrintStream;
-import java.util.List;
-import java.util.Collection;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Iterator;
-import java.util.Map;
-
 /**
   An <code>IndexWriter</code> creates and maintains an index.
 
@@ -1965,6 +1970,7 @@
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
+      deleteTermsLive(term);
       boolean doFlush = docWriter.bufferDeleteTerm(term);
       if (doFlush)
         flush(true, false, false);
@@ -1989,6 +1995,7 @@
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
+      deleteTermsLive(terms);
       boolean doFlush = docWriter.bufferDeleteTerms(terms);
       if (doFlush)
         flush(true, false, false);
@@ -1996,7 +2003,26 @@
       handleOOM(oom, "deleteDocuments(Term..)");
     }
   }
-
+  
+  private void deleteTermsLive(Term... terms) throws IOException {
+    synchronized (this) {
+      for (SegmentInfo info : segmentInfos) {
+        SegmentReader reader = readerPool.get(info, false);
+        try {
+          synchronized (reader) {
+            if (reader.bufferedDeletes == null) {
+              reader.bufferedDeletes = new Deletes();
+            }
+            reader.bufferedDeletes.addTerms(terms);
+          }
+        } finally {
+          readerPool.release(reader);
+        }
+      }
+    }
+    applyReaderDeletes();
+  }
+  
   /**
    * Deletes the document(s) matching the provided query.
    *
@@ -2010,6 +2036,7 @@
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
+    deleteQueriesLive(query);
     boolean doFlush = docWriter.bufferDeleteQuery(query);
     if (doFlush)
       flush(true, false, false);
@@ -2030,12 +2057,266 @@
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
+    deleteQueriesLive(queries);
     boolean doFlush = docWriter.bufferDeleteQueries(queries);
     if (doFlush)
       flush(true, false, false);
   }
-
+  
+  private void deleteQueriesLive(Query... queries) throws IOException {
+    synchronized (this) {
+      for (SegmentInfo info : segmentInfos) {
+        SegmentReader reader = readerPool.get(info, false);
+        try {
+          synchronized (reader) {
+            reader.bufferedDeletes.addQueries(queries);
+          }
+        } finally {
+          readerPool.release(reader);
+        }
+      }
+    }
+    applyReaderDeletes();
+  }
+  
+  synchronized SegmentReader[] getReaders(boolean openDocStore) throws IOException {
+    SegmentReader[] readers = new SegmentReader[segmentInfos.size()];
+    for (int x=0; x < segmentInfos.size(); x++) {
+      readers[x] = readerPool.get(segmentInfos.info(x), openDocStore);
+    }
+    return readers;
+  }
+  
   /**
+   * Deletes are added to readers synchronized on writer to insure
+   * all current readers receive the delete (or update).  However
+   * the resolution of the delete to the doc id occurs un-synchronized.
+   * 
+   * commitMerge is synchronized, and so no new deletes arrive, all
+   * unresolved deletes are resolved, and the queued deletes are merged 
+   * into the queue of the merged reader.
+   * 
+   * For example a call to deleteDocument will add the deleted term to all 
+   * SR delete queues.  Then all pending deletes in the queues of the readers
+   * will be applied regardless of whether or not they are the term/query 
+   * of the call to deleteDocument.  We try to apply the deletes as much as possible
+   * however it's ok, if they're not applied immediately, because eventually they will.
+   * The most important thing is to not lose deletes, which would happen if we 
+   * did not synchronize on writer when adding the delete term/query to the 
+   * SR deletes queue.
+   */
+  private void applyReaderDeletes() throws IOException {
+    SegmentReader[] readers = getReaders(false);
+    for (SegmentReader reader : readers) {
+      try {
+        applyReaderDeletes(reader);
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
+  /**
+   * Iteratively deletes the terms and queries
+   * un-synchronized on writer.
+   */
+  private void applyReaderDeletes(SegmentReader reader) throws IOException {
+    if (reader.bufferedDeletes == null) {
+      reader.bufferedDeletes = new Deletes();
+    }
+    deleteQueriesLive(reader);
+    deleteTermsLive(reader);
+    resolveUpdatesDeletes(reader);
+  }
+  
+  /**
+   * Called after flush is successful.  
+   */
+  private synchronized void applyUpdateDeleteDocIDs() throws IOException {
+    SegmentReader[] readers = getReaders(false);
+    for (SegmentReader reader : readers) {
+      try {
+        if (reader.bufferedDeletes != null) {
+          for (int docId : reader.bufferedDeletes.docIds) {
+            reader.deleteDocument(docId);
+          }
+        }
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
+  private void deleteTermsLive(SegmentReader reader) throws IOException {
+    while (deleteTermLive(reader)) {}
+  }
+  
+  // resolve delete term un-synchronized
+  private boolean deleteTermLive(SegmentReader reader) throws IOException {
+    Term term = reader.bufferedDeletes.pollTerm();
+    if (term == null) return false;
+    TermDocs docs = reader.termDocs();
+    try {
+      docs.seek(term);
+      while (docs.next()) {
+        int docID = docs.doc();
+        reader.deleteDocument(docID);
+      }
+    } finally {
+      docs.close();
+    }
+    return true;
+  }
+  
+  /**
+   * During the merge, new buffered deletes could have come in, we can't apply
+   * them right now, so we need to merge the reader's deletes.
+   */
+  protected synchronized void mergeBufferedDeletes(MergePolicy.OneMerge merge, int[][] docMap) {
+    if (docMap == null) return;
+    Deletes mergedDeletes = new Deletes();
+    for (int x=0; x < merge.readers.length; x++) {
+      mergedDeletes.addTerms(merge.readers[x].bufferedDeletes.termMap.values());
+      mergedDeletes.addQueries(merge.readers[x].bufferedDeletes.queryMap.values());
+      mergedDeletes.addUpdateTerms(merge.readers[x].bufferedDeletes.updateTermMap.values());
+      int[] docs = docMap[x];
+      if (merge.readers[x].bufferedDeletes != null) {
+        for (int docId : merge.readers[x].bufferedDeletes.docIds) {
+          int mdocid = docs[docId];
+          if (mdocid != -1) {
+            mergedDeletes.addDocID(mdocid);
+          }
+        }
+      }
+    }
+  }
+  
+  private void resolveUpdatesDeletes(SegmentReader reader) throws IOException {
+    while (resolveUpdateDeletes(reader)) {}
+  }
+  
+  // resolve doc ids un-synchronized
+  private boolean resolveUpdateDeletes(SegmentReader reader) throws IOException {
+    Term term = reader.bufferedDeletes.pollUpdateTerm();
+    if (term == null) return false;
+    TermDocs docs = reader.termDocs();
+    try {
+      docs.seek(term);
+      while (docs.next()) {
+        int docID = docs.doc();
+        reader.bufferedDeletes.addDocID(docID);
+      }
+    } finally {
+      docs.close();
+    }
+    return true;
+  }
+  
+  private void deleteQueriesLive(SegmentReader reader) throws IOException {
+    while (deleteQueryLive(reader)) {}
+  }
+  
+  private boolean deleteQueryLive(SegmentReader reader) throws IOException {
+    Query query = null;
+    synchronized (this) {
+      query = reader.bufferedDeletes.pollQuery();
+    }
+    if (query == null) return false;
+    IndexSearcher searcher = new IndexSearcher(reader);
+    Weight weight = query.weight(searcher);
+    Scorer scorer = weight.scorer(reader, true, false);
+    if (scorer != null) {
+      while(true)  {
+        int docID = scorer.nextDoc();
+        if (docID != DocIdSetIterator.NO_MORE_DOCS) {
+          reader.deleteDocument(docID);
+          break;
+        }
+      }
+    }
+    searcher.close();
+    return true;
+  }
+  
+  public static class Deletes {
+    Map<Term,Term> updateTermMap = new TreeMap<Term,Term>();
+    Map<Term,Term> termMap = new TreeMap<Term,Term>();
+    Map<Query,Query> queryMap = new HashMap<Query,Query>();
+    List<Integer> docIds = new ArrayList<Integer>();
+    
+    public synchronized void addTerms(Collection<Term> terms) {
+      for (Term term : terms) {
+        termMap.put(term, term);
+      }
+    }
+    
+    public synchronized void addQueries(Collection<Query> queries) {
+      for (Query query : queries) {
+        queryMap.put(query, query);
+      }
+    }
+    
+    public synchronized void addUpdateTerms(Collection<Term> terms) {
+      for (Term term : terms) {
+        updateTermMap.put(term, term);
+      }
+    }
+    
+    public synchronized void addUpdateTerm(Term term) {
+      updateTermMap.put(term, term);
+    }
+    
+    private synchronized Object remove(Map map) {
+      if (map.size() > 0) {
+        Object obj = map.values().iterator().next();
+        map.remove(obj);
+        return obj;
+      }
+      return null;
+    }
+    
+    public synchronized Term pollUpdateTerm() {
+      return (Term)remove(updateTermMap);
+    }
+    
+    public synchronized void addDocID(int docID) {
+      docIds.add(docID);
+    }
+    
+    public synchronized Query pollQuery() {
+      return (Query)remove(queryMap);
+    }
+    
+    public synchronized Term pollTerm() {
+      return (Term)remove(termMap);
+    }
+    
+    public synchronized void removeUpdateTerm(Term term) {
+      updateTermMap.remove(term);
+    }
+    
+    public synchronized void removeTerm(Term term) {
+      termMap.remove(term);
+    }
+    
+    public synchronized void removeQuery(Query query) {
+      queryMap.remove(query);
+    }
+    
+    public synchronized void addTerms(Term... terms) {
+      for (Term term : terms) {
+        termMap.put(term, term);
+      }
+    }
+    
+    public synchronized void addQueries(Query... queries) {
+      for (Query query : queries) {
+        queryMap.put(query, query);
+      }
+    }
+  }
+  
+  /**
    * Updates a document by first deleting the document(s)
    * containing <code>term</code> and then adding the new
    * document.  The delete and then add are atomic as seen
@@ -2082,7 +2363,23 @@
       boolean doFlush = false;
       boolean success = false;
       try {
+        synchronized (this) {
+          for (SegmentInfo info : segmentInfos) {
+            SegmentReader reader = readerPool.get(info, false);
+            try {
+              synchronized (reader) {
+                if (reader.bufferedDeletes == null) {
+                  reader.bufferedDeletes = new Deletes();
+                }
+                reader.bufferedDeletes.addUpdateTerm(term);
+              }
+            } finally {
+              readerPool.release(reader);
+            }
+          }
+        }
         doFlush = docWriter.updateDocument(term, doc, analyzer);
+        applyReaderDeletes();
         success = true;
       } finally {
         if (!success) {
@@ -3637,8 +3934,14 @@
 
       if (flushDeletes) {
         flushDeletesCount++;
-        applyDeletes();
+        if (newSegment != null) {
+          applyDeletes(newSegment);
+        }
       }
+      // apply the buffered deletes
+      applyReaderDeletes();
+      // apply the updated doc ids to the readers
+      applyUpdateDeleteDocIDs();
       
       if (flushDocs)
         checkpoint();
@@ -3799,8 +4102,11 @@
     }
 
     final int start = ensureContiguousMerge(merge);
+    
+    applyReaderDeletes();
+    mergeBufferedDeletes(merge, merger.getDocMaps());
+    commitMergedDeletes(merge, mergedReader);
 
-    commitMergedDeletes(merge, mergedReader);
     docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
       
     // Simple optimization: if the doc store we are using
@@ -4017,8 +4323,6 @@
     if (merge.isAborted())
       return;
 
-    applyDeletes();
-
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
 
@@ -4402,14 +4706,16 @@
       mergeExceptions.add(merge);
   }
 
-  // Apply buffered deletes to all segments.
-  private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
+  // Apply buffered deletes only to the flushed segment
+  private final synchronized boolean applyDeletes(SegmentInfo info) throws CorruptIndexException, IOException {
     assert testPoint("startApplyDeletes");
+    SegmentInfos applyDeletesInfos = new SegmentInfos();
+    applyDeletesInfos.add(info);
     SegmentInfos rollback = (SegmentInfos) segmentInfos.clone();
     boolean success = false;
     boolean changed;
     try {
-      changed = docWriter.applyDeletes(segmentInfos);
+      changed = docWriter.applyDeletes(applyDeletesInfos);
       success = true;
     } finally {
       if (!success) {
