Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java	(revision 881652)
+++ src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -53,6 +53,7 @@
   CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
 
   BitVector deletedDocs = null;
+  BitVector pendingDeletedDocs = null;
   Ref deletedDocsRef = null;
   private boolean deletedDocsDirty = false;
   private boolean normsDirty = false;
@@ -68,7 +69,11 @@
   private Ref singleNormRef;
 
   CoreReaders core;
-
+  
+  public boolean hasPendingDeletions() {
+    return pendingDeletedDocs != null;
+  }
+  
   // Holds core readers that are shared (unchanged) when
   // SegmentReader is cloned or reopened
   static final class CoreReaders {
@@ -700,6 +705,13 @@
           clone.deletedDocs = deletedDocs;
           clone.deletedDocsRef = deletedDocsRef;
         }
+        // we copy over as is and don't worry about
+        // copy on write because it's used as a queue
+        // of pending deletes (i.e. deletes 
+        // accumulate inside it)
+        if (pendingDeletedDocs != null) {
+          clone.pendingDeletedDocs = pendingDeletedDocs;
+        }
       } else {
         if (!deletionsUpToDate) {
           // load deleted docs
@@ -815,7 +827,14 @@
   static boolean hasSeparateNorms(SegmentInfo si) throws IOException {
     return si.hasSeparateNorms();
   }
-
+  
+  void doPendingDelete(int docNum) {
+    if (pendingDeletedDocs == null) {
+      pendingDeletedDocs = new BitVector(maxDoc());
+    }
+    
+  }
+  
   @Override
   protected void doDelete(int docNum) {
     if (deletedDocs == null) {
@@ -878,7 +897,11 @@
     ensureOpen();
     return getFieldsReader().doc(n, fieldSelector);
   }
-
+  
+  public synchronized boolean isPendingDeleted(int n) {
+    return (pendingDeletedDocs != null && pendingDeletedDocs.get(n));
+  }
+  
   @Override
   public synchronized boolean isDeleted(int n) {
     return (deletedDocs != null && deletedDocs.get(n));
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 881652)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -458,6 +458,8 @@
         }
 
         deletesInRAM.clear();
+        
+        writer.clearPendingDeletes();
 
         openFiles.clear();
 
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 881652)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -17,32 +17,37 @@
  * limitations under the License.
  */
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DocumentsWriter.IndexingChain;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.util.BitVector;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.ThreadInterruptedException;
 
-import java.io.IOException;
-import java.io.Closeable;
-import java.io.PrintStream;
-import java.util.List;
-import java.util.Collection;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Iterator;
-import java.util.Map;
-
 /**
   An <code>IndexWriter</code> creates and maintains an index.
 
@@ -1950,8 +1955,103 @@
       handleOOM(oom, "addDocument");
     }
   }
-
+  
   /**
+   * Delete the given term from the pending deleted docs
+   * of each current segment reader.
+   * @param term
+   * @throws IOException
+   */
+  private synchronized void deletePending(Term term) throws IOException {
+    for (SegmentInfo info : segmentInfos) {
+      SegmentReader reader = readerPool.get(info, false);
+      try {
+        if (reader.pendingDeletedDocs == null) {
+          reader.pendingDeletedDocs = new BitVector(reader.maxDoc());
+        }
+        TermDocs docs = reader.termDocs();
+        try {
+          docs.seek(term);
+          while (docs.next()) {
+            int docID = docs.doc();
+            reader.pendingDeletedDocs.set(docID);
+          }
+        } finally {
+          docs.close();
+        }
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
+  private synchronized void deleteQueriesLive(Query... queries) throws IOException {
+    for (SegmentInfo info : segmentInfos) {
+      SegmentReader reader = readerPool.get(info, false);
+      try {
+        deleteQueriesLive(reader, queries);
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
+  private synchronized void deleteTermsLive(Term... terms) throws IOException {
+    for (SegmentInfo info : segmentInfos) {
+      SegmentReader reader = readerPool.get(info, false);
+      try {
+        deleteTermsLive(reader, terms);
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
+  void clearPendingDeletes() throws IOException {
+    for (SegmentInfo info : segmentInfos) {
+      SegmentReader reader = readerPool.get(info, false);
+      try {
+        reader.pendingDeletedDocs = null;
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
+  private void deleteQueriesLive(IndexReader reader, Query... queries) throws IOException {
+    IndexSearcher searcher = new IndexSearcher(reader);
+    for (Query query : queries) {
+      Weight weight = query.weight(searcher);
+      Scorer scorer = weight.scorer(reader, true, false);
+      if (scorer != null) {
+        while(true)  {
+          int docID = scorer.nextDoc();
+          if (docID != DocIdSetIterator.NO_MORE_DOCS) {
+            reader.deleteDocument(docID);
+            break;
+          }
+        }
+      }
+    }
+    searcher.close();
+  }
+  
+  protected void deleteTermsLive(IndexReader reader, Term... terms) throws IOException {
+    TermDocs docs = reader.termDocs();
+    try {
+      for (Term term : terms) {
+        docs.seek(term);
+        while (docs.next()) {
+          int docID = docs.doc();
+          reader.deleteDocument(docID);
+        }
+      }
+    } finally {
+      docs.close();
+    }
+  }
+  
+  /**
    * Deletes the document(s) containing <code>term</code>.
    *
    * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
@@ -1965,6 +2065,7 @@
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
+      deleteTermsLive(term);
       boolean doFlush = docWriter.bufferDeleteTerm(term);
       if (doFlush)
         flush(true, false, false);
@@ -1989,6 +2090,7 @@
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
+      deleteTermsLive(terms);
       boolean doFlush = docWriter.bufferDeleteTerms(terms);
       if (doFlush)
         flush(true, false, false);
@@ -2010,6 +2112,7 @@
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
+    deleteQueriesLive(query);
     boolean doFlush = docWriter.bufferDeleteQuery(query);
     if (doFlush)
       flush(true, false, false);
@@ -2030,6 +2133,7 @@
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
+    deleteQueriesLive(queries);
     boolean doFlush = docWriter.bufferDeleteQueries(queries);
     if (doFlush)
       flush(true, false, false);
@@ -2082,6 +2186,8 @@
       boolean doFlush = false;
       boolean success = false;
       try {
+        // delete term in SRs' pending deleted docs
+        deletePending(term);
         doFlush = docWriter.updateDocument(term, doc, analyzer);
         success = true;
       } finally {
@@ -3637,7 +3743,14 @@
 
       if (flushDeletes) {
         flushDeletesCount++;
-        applyDeletes();
+        if (newSegment != null) {
+          // the segment successfully flushed
+          // merge the update docs deletes (aka pending dels)
+          // into the segment readers' main deleted docs
+          mergePendingDeletes();
+          // apply buffered deletes to the new segment
+          applyDeletes(newSegment);
+        }
       }
       
       if (flushDocs)
@@ -3655,7 +3768,30 @@
       docWriter.resumeAllThreads();
     }
   }
-
+  
+  /**
+   * 
+   * @throws IOException
+   */
+  synchronized void mergePendingDeletes() throws IOException {
+    for (SegmentInfo info : segmentInfos) {
+      SegmentReader reader = readerPool.get(info, false);
+      try {
+        if (reader.pendingDeletedDocs != null) {
+          int size = reader.pendingDeletedDocs.size();
+          for (int x=0; x < size; x++) {
+            if (reader.pendingDeletedDocs.get(x)) {
+              reader.deleteDocument(x);
+            }
+          }
+          reader.pendingDeletedDocs = null;
+        }
+      } finally {
+        readerPool.release(reader);
+      }
+    }
+  }
+  
   /** Expert:  Return the total size of all index files currently cached in memory.
    * Useful for size management with flushRamDocs()
    */
@@ -3717,7 +3853,8 @@
     // started merging:
     int docUpto = 0;
     int delCount = 0;
-
+    int pendingDelCount = 0;
+    
     for(int i=0; i < sourceSegments.size(); i++) {
       SegmentInfo info = sourceSegments.info(i);
       int docCount = info.docCount;
@@ -3731,11 +3868,17 @@
         // the merge started, we must now carefully keep any
         // newly flushed deletes but mapping them to the new
         // docIDs.
-
-        if (currentReader.numDeletedDocs() > previousReader.numDeletedDocs()) {
+        
+        // if there are pending deletions, then we map them over
+        // to the merged reader
+        if ( (currentReader.numDeletedDocs() > previousReader.numDeletedDocs())
+            || currentReader.hasPendingDeletions()) {
           // This means this segment has had new deletes
           // committed since we started the merge, so we
           // must merge them:
+          
+          // we only merge pending deletes if they're 
+          // not deleted in previous reader
           for(int j=0;j<docCount;j++) {
             if (previousReader.isDeleted(j))
               assert currentReader.isDeleted(j);
@@ -3744,6 +3887,10 @@
                 mergeReader.doDelete(docUpto);
                 delCount++;
               }
+              if (currentReader.isPendingDeleted(j)) {
+                mergeReader.doPendingDelete(docUpto);
+                pendingDelCount++;
+              }
               docUpto++;
             }
           }
@@ -3764,9 +3911,11 @@
         // No deletes before or after
         docUpto += info.docCount;
     }
-
+    
     assert mergeReader.numDeletedDocs() == delCount;
-
+    if (mergeReader.pendingDeletedDocs != null) {
+      assert mergeReader.pendingDeletedDocs.count() == pendingDelCount;
+    }
     mergeReader.hasChanges = delCount >= 0;
   }
 
@@ -4012,8 +4161,6 @@
     if (merge.isAborted())
       return;
 
-    applyDeletes();
-
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
 
@@ -4396,8 +4543,28 @@
     if (!mergeExceptions.contains(merge) && mergeGen == merge.mergeGen)
       mergeExceptions.add(merge);
   }
-
+  
+  private final synchronized boolean applyDeletes(SegmentInfo info) throws CorruptIndexException, IOException {
+    assert testPoint("startApplyDeletes");
+    SegmentInfos rollback = (SegmentInfos) segmentInfos.clone();
+    boolean success = false;
+    boolean changed;
+    try {
+      SegmentInfos infos = new SegmentInfos();
+      infos.add(info);
+      changed = docWriter.applyDeletes(infos);
+      success = true;
+    } finally {
+      if (!success) {
+        if (infoStream != null)
+          message("hit exception flushing deletes");
+      }
+    }
+    return changed;
+  }
+  
   // Apply buffered deletes to all segments.
+  /**
   private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
     assert testPoint("startApplyDeletes");
     SegmentInfos rollback = (SegmentInfos) segmentInfos.clone();
@@ -4433,7 +4600,7 @@
       checkpoint();
     return changed;
   }
-
+  **/
   // For test purposes.
   final synchronized int getBufferedDeleteTermsSize() {
     return docWriter.getBufferedDeleteTerms().size();
