Index: src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
===================================================================
--- src/test/org/apache/lucene/index/TestPerSegmentDeletes.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestPerSegmentDeletes.java	(revision 0)
@@ -0,0 +1,222 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
+public class TestPerSegmentDeletes extends LuceneTestCase {
+  /**
+   * Deletes are added to 
+   * @throws Exception
+   */
+  public void testDeletes1() throws Exception {
+    Directory dir = new MockDirectoryWrapper(new RAMDirectory());
+    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_CURRENT, new MockAnalyzer());
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    iwc.setMaxBufferedDocs(5000);
+    iwc.setRAMBufferSizeMB(400);
+    FirstAndSecond fsmp = new FirstAndSecond(false);
+    iwc.setMergePolicy(fsmp);
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    for (int x = 0; x < 5; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "1", 2));
+      System.out.println("numRamDocs("+x+")"+writer.numRamDocs());
+    }
+    System.out.println("commit1");
+    writer.commit();
+    assertEquals(1, writer.lastSegmentIndex);
+    assertEquals(1, writer.segmentInfos.size());
+    for (int x = 5; x < 10; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "2", 2));
+      System.out.println("numRamDocs("+x+")"+writer.numRamDocs());
+    }
+    System.out.println("commit2");
+    writer.commit();
+    assertEquals(2, writer.lastSegmentIndex);
+    assertEquals(2, writer.segmentInfos.size());
+    
+    for (int x = 10; x < 15; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "3", 2));
+      System.out.println("numRamDocs("+x+")"+writer.numRamDocs());
+    }
+    System.out.println("flush(false, false, false)");
+    writer.flush(false, false, false);
+    assertEquals(2, writer.lastSegmentIndex);
+    assertEquals(3, writer.segmentInfos.size());
+    
+    // added docs are in the ram buffer
+    for (int x = 15; x < 20; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
+      System.out.println("numRamDocs("+x+")"+writer.numRamDocs());
+    }
+    assertTrue(writer.numRamDocs() > 0);
+    // delete from the ram buffer
+    writer.deleteDocuments(new Term("id", Integer.toString(13)));
+    
+    Term id3 = new Term("id", Integer.toString(3));
+    
+    // delete from the 1st segment
+    writer.deleteDocuments(id3);
+    
+    assertTrue(writer.numRamDocs() > 0);
+    System.out.println("segments:"+writer.segmentInfos);
+    System.out.println("segdels1:"+writer.docWriter.segmentDeletes.toString());
+    
+    assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    // we cause a merge to happen
+    fsmp.doMerge = true;
+    System.out.println("maybeMerge");
+    writer.maybeMerge();
+    
+    System.out.println("segdels1:"+writer.docWriter.segmentDeletes.toString());
+    
+    // there should be docs in RAM
+    assertTrue(writer.numRamDocs() > 0);
+    
+    // assert we've merged the 1 and 2 segments
+    // and still have a segment leftover == 2
+    assertEquals(2, writer.segmentInfos.size());
+    
+    System.out.println("lastSegmentIndex:"+writer.lastSegmentIndex);
+    
+    assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    IndexReader r = writer.getReader();
+    IndexReader r1 = r.getSequentialSubReaders()[0];
+    printDelDocs(r1.getDeletedDocs());
+    int[] docs = toDocsArray(id3, null, r);
+    // there shouldn't be any docs for id:3
+    assertTrue(docs == null);
+    //System.out.println("id3 docs:"+Arrays.toString(docs));
+    //assertEquals(0, docs.length);
+    r.close();
+    
+    //System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
+    
+    writer.close();
+    dir.close();
+  }
+  
+  public static void printDelDocs(Bits bits) {
+    if (bits == null) return;
+    for (int x=0; x < bits.length(); x++) {
+      System.out.println(x+":"+bits.get(x));
+    }
+  }
+  
+  public static int[] toDocsArray(Term term, Bits bits, IndexReader reader) throws IOException {
+    Fields fields = MultiFields.getFields(reader);
+    Terms cterms = fields.terms(term.field);    
+    TermsEnum ctermsEnum = cterms.iterator();
+    SeekStatus ss = ctermsEnum.seek(new BytesRef(term.text()), false);
+    if (ss.equals(SeekStatus.FOUND)) {
+      DocsEnum docsEnum = ctermsEnum.docs(bits, null);
+      return toArray(docsEnum);
+    }
+    return null;
+  }
+  
+  public static int[] toArray(DocsEnum docsEnum)
+      throws IOException {
+    List<Integer> docs = new ArrayList<Integer>();
+    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsEnum.docID();
+      docs.add(docID);
+    }
+    return ArrayUtil.toIntArray(docs);
+  }
+  
+  public class FirstAndSecond extends MergePolicy {
+    boolean doMerge = false;
+    
+    private final boolean useCompoundFile;
+    
+    private FirstAndSecond(boolean useCompoundFile) {
+      this.useCompoundFile = useCompoundFile;
+    }
+    
+    @Override
+    public void close() {}
+    
+    public MergeSpecification findMerges(SegmentInfos segmentInfos)
+        throws CorruptIndexException, IOException {
+      MergeSpecification ms = new MergeSpecification();
+      if (doMerge) {
+        SegmentInfos mergeInfos = new SegmentInfos();
+        mergeInfos.add(segmentInfos.get(0));
+        mergeInfos.add(segmentInfos.get(1));
+        OneMerge om = new OneMerge(mergeInfos, false);
+        ms.add(om);
+        doMerge = false;
+        return ms;
+      }
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
+        int maxSegmentCount, Set<SegmentInfo> segmentsToOptimize)
+        throws CorruptIndexException, IOException { return null; }
+
+    @Override
+    public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
+        throws CorruptIndexException, IOException { return null; }
+    
+    @Override
+    public boolean useCompoundDocStore(SegmentInfos segments) { return useCompoundFile; }
+
+    @Override
+    public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) { return useCompoundFile; }
+  }
+  
+  /**
+   * Test the last segment index is set on index writer init
+   */
+  public void testInitLastSegmentIndex() throws Exception {
+    Directory dir = newDirectory();
+    
+    Document doc = new Document();
+    DocHelper.setupDoc(doc);
+    DocHelper.writeDoc(dir, doc);
+    
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer()));
+    
+    assertEquals(writer.lastSegmentIndex, 1);
+    
+    writer.close();
+    dir.close();
+  }
+}
Index: src/java/org/apache/lucene/index/BufferedDeletes.java
===================================================================
--- src/java/org/apache/lucene/index/BufferedDeletes.java	(revision 1027655)
+++ src/java/org/apache/lucene/index/BufferedDeletes.java	(working copy)
@@ -40,6 +40,7 @@
   List<Integer> docIDs = new ArrayList<Integer>();
   long bytesUsed;
   private final boolean doTermSort;
+  SegmentInfo info;
 
   public BufferedDeletes(boolean doTermSort) {
     this.doTermSort = doTermSort;
@@ -50,6 +51,13 @@
     }
   }
 
+  @Override
+  public String toString() {
+    return "BufferedDeletes [numTerms=" + numTerms + ", terms=" + terms
+        + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed="
+        + bytesUsed + ", doTermSort=" + doTermSort + ", info=" + info + "]";
+  }
+
   // Number of documents a delete term applies to.
   final static class Num {
     private int num;
@@ -57,7 +65,11 @@
     Num(int num) {
       this.num = num;
     }
-
+    
+    public String toString() {
+      return num+"";
+    }
+    
     int getNum() {
       return num;
     }
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 1027655)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -152,7 +152,11 @@
   private final int maxThreadStates;
 
   List<String> newFiles;
+  
+  Map<SegmentInfo,BufferedDeletes> segmentDeletes = new HashMap<SegmentInfo,BufferedDeletes>();
 
+  long segDeletesBytesUsed;
+  
   static class DocState {
     DocumentsWriter docWriter;
     Analyzer analyzer;
@@ -284,7 +288,7 @@
 
   // Deletes done before the last flush; these are still
   // kept on abort
-  private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
+  private BufferedDeletes deletesFlushedLastSeg = new BufferedDeletes(true);
 
   // The max number of delete terms that can be buffered before
   // they must be flushed to disk.
@@ -518,7 +522,7 @@
         }
 
         deletesInRAM.clear();
-        deletesFlushed.clear();
+        deletesFlushedLastSeg.clear();
 
         openFiles.clear();
 
@@ -710,8 +714,16 @@
     flushPending = false;
   }
 
+  synchronized void verifySegmentDeletes(int lastSegmentIndex, SegmentInfos infos) {
+    IndexWriter.debug2("verifySegmentDeletes lastSegmentIndex:"+lastSegmentIndex+" infos:"+infos);
+    for (int x=0; x < lastSegmentIndex; x++) {
+      SegmentInfo info = infos.get(x);
+      segmentDeletes.remove(info);
+    }
+  }
+  
   synchronized void pushDeletes() {
-    deletesFlushed.update(deletesInRAM);
+    deletesFlushedLastSeg.update(deletesInRAM);
   }
 
   synchronized void close() {
@@ -735,7 +747,7 @@
    * flush is pending.  If delTerm is non-null then we
    * buffer this deleted term after the thread state has
    * been acquired. */
-  synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
+  synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm, SegmentInfos subInfos) throws IOException {
 
     final Thread currentThread = Thread.currentThread();
 
@@ -785,7 +797,7 @@
       assert writer.testPoint("DocumentsWriter.ThreadState.init start");
 
       if (delTerm != null) {
-        addDeleteTerm(delTerm, state.docState.docID);
+        addDeleteTerm(delTerm, state.docState.docID, subInfos);
         state.doFlushAfter = timeToFlushDeletes();
       }
 
@@ -822,21 +834,21 @@
 
   /** Returns true if the caller (IndexWriter) should now
    * flush. */
-  boolean addDocument(Document doc, Analyzer analyzer)
+  boolean addDocument(Document doc, Analyzer analyzer, SegmentInfos subInfos)
     throws CorruptIndexException, IOException {
-    return updateDocument(doc, analyzer, null);
+    return updateDocument(doc, analyzer, null, subInfos);
   }
 
-  boolean updateDocument(Term t, Document doc, Analyzer analyzer)
+  boolean updateDocument(Term t, Document doc, Analyzer analyzer, SegmentInfos subInfos)
     throws CorruptIndexException, IOException {
-    return updateDocument(doc, analyzer, t);
+    return updateDocument(doc, analyzer, t, subInfos);
   }
 
-  boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
+  boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm, SegmentInfos subInfos)
     throws CorruptIndexException, IOException {
     
     // This call is synchronized but fast
-    final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
+    final DocumentsWriterThreadState state = getThreadState(doc, delTerm, subInfos);
 
     final DocState docState = state.docState;
     docState.doc = doc;
@@ -921,7 +933,11 @@
       return;
     MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
     deletesInRAM.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-    deletesFlushed.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
+    
+    for (BufferedDeletes deletes : segmentDeletes.values()) {
+      deletes.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
+    }
+    deletesFlushedLastSeg.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
     flushedDocCount -= mapper.docShift;
   }
 
@@ -939,45 +955,45 @@
       throw new AlreadyClosedException("this IndexWriter is closed");
   }
 
-  boolean bufferDeleteTerms(Term[] terms) throws IOException {
+  boolean bufferDeleteTerms(Term[] terms, SegmentInfos subInfos) throws IOException {
     synchronized(this) {
       waitReady(null);
       for (int i = 0; i < terms.length; i++)
-        addDeleteTerm(terms[i], numDocsInRAM);
+        addDeleteTerm(terms[i], numDocsInRAM, subInfos);
     }
     return timeToFlushDeletes();
   }
 
-  boolean bufferDeleteTerm(Term term) throws IOException {
+  boolean bufferDeleteTerm(Term term, SegmentInfos subInfos) throws IOException {
     synchronized(this) {
       waitReady(null);
-      addDeleteTerm(term, numDocsInRAM);
+      addDeleteTerm(term, numDocsInRAM, subInfos);
     }
     return timeToFlushDeletes();
   }
 
-  boolean bufferDeleteQueries(Query[] queries) throws IOException {
+  boolean bufferDeleteQueries(Query[] queries, SegmentInfos subInfos) throws IOException {
     synchronized(this) {
       waitReady(null);
       for (int i = 0; i < queries.length; i++)
-        addDeleteQuery(queries[i], numDocsInRAM);
+        addDeleteQuery(queries[i], numDocsInRAM, subInfos);
     }
     return timeToFlushDeletes();
   }
 
-  boolean bufferDeleteQuery(Query query) throws IOException {
+  boolean bufferDeleteQuery(Query query, SegmentInfos subInfos) throws IOException {
     synchronized(this) {
       waitReady(null);
-      addDeleteQuery(query, numDocsInRAM);
+      addDeleteQuery(query, numDocsInRAM, subInfos);
     }
     return timeToFlushDeletes();
   }
 
   synchronized boolean deletesFull() {
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + bytesUsed()) >= ramBufferSize) ||
+            (deletesInRAM.bytesUsed + deletesFlushedLastSeg.bytesUsed + bytesUsed()) >= ramBufferSize) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-       ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
+       ((deletesInRAM.size() + deletesFlushedLastSeg.size()) >= maxBufferedDeleteTerms));
   }
 
   synchronized boolean doApplyDeletes() {
@@ -990,9 +1006,9 @@
     // when merges (which always apply deletes) are
     // infrequent.
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
+            (deletesInRAM.bytesUsed + deletesFlushedLastSeg.bytesUsed) >= ramBufferSize/2) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-       ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
+       ((deletesInRAM.size() + deletesFlushedLastSeg.size()) >= maxBufferedDeleteTerms));
   }
 
   private boolean timeToFlushDeletes() {
@@ -1011,20 +1027,20 @@
   }
 
   synchronized boolean hasDeletes() {
-    return deletesFlushed.any();
+    return deletesFlushedLastSeg.any();
   }
-
+  
   synchronized boolean applyDeletes(SegmentInfos infos) throws IOException {
+    IndexWriter.debug2("applyDeletes infos:"+infos);
+    //if (!hasDeletes())
+    //  return false;
 
-    if (!hasDeletes())
-      return false;
-
     final long t0 = System.currentTimeMillis();
 
     if (infoStream != null)
-      message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
-              deletesFlushed.docIDs.size() + " deleted docIDs and " +
-              deletesFlushed.queries.size() + " deleted queries on " +
+      message("apply " + deletesFlushedLastSeg.numTerms + " buffered deleted terms and " +
+          deletesFlushedLastSeg.docIDs.size() + " deleted docIDs and " +
+          deletesFlushedLastSeg.queries.size() + " deleted queries on " +
               + infos.size() + " segments.");
 
     final int infosEnd = infos.size();
@@ -1032,21 +1048,35 @@
     int docStart = 0;
     boolean any = false;
     for (int i = 0; i < infosEnd; i++) {
-
+      SegmentInfo info = infos.info(i);
+      
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
-
-      SegmentReader reader = writer.readerPool.get(infos.info(i), false);
+      assert info.dir == directory;
+      
+      BufferedDeletes deletes = segmentDeletes.get(info);
+      if (deletes == null) {
+        deletes = deletesFlushedLastSeg;
+        IndexWriter.debug2("deletesFlushedLastSeg:"+deletesFlushedLastSeg);
+      }
+      //if (i > writer.lastSegmentIndex) {
+      //  assert !segmentDeletes.containsKey(info);
+      //  deletes = deletesFlushedLastSeg;
+      //} else {
+      //  deletes = segmentDeletes.get(info);
+      //}
+      
+      SegmentReader reader = writer.readerPool.get(info, false);
       try {
-        any |= applyDeletes(reader, docStart);
+        any |= applyDeletes(reader, docStart, deletes);
         docStart += reader.maxDoc();
       } finally {
         writer.readerPool.release(reader);
       }
     }
 
-    deletesFlushed.clear();
+    deletesFlushedLastSeg.clear();
+    
     if (infoStream != null) {
       message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
     }
@@ -1065,19 +1095,31 @@
     lastDeleteTerm = term;
     return true;
   }
-
+  /**
+  private final synchronized boolean applyDeletesToSegment(SegmentInfo info) throws CorruptIndexException, IOException {
+    SegmentReader reader = null;
+    try {
+      reader = writer.readerPool.get(info, false);
+      BufferedDeletes deletes = segmentDeletes.get(info);
+      int docStart = 0;
+      return applyDeletes(reader, docStart, deletes);
+    } finally {
+      writer.readerPool.release(reader);
+    }
+  }
+  **/
   // Apply buffered delete terms, queries and docIDs to the
   // provided reader
-  private final synchronized boolean applyDeletes(IndexReader reader, int docIDStart)
+  private final synchronized boolean applyDeletes(IndexReader reader, int docIDStart, BufferedDeletes deletes)
     throws CorruptIndexException, IOException {
 
     final int docEnd = docIDStart + reader.maxDoc();
     boolean any = false;
 
     assert checkDeleteTerm(null);
-
+    
     // Delete by term
-    if (deletesFlushed.terms.size() > 0) {
+    if (deletes.terms.size() > 0) {
       Fields fields = reader.fields();
       if (fields == null) {
         // This reader has no postings
@@ -1089,7 +1131,7 @@
       String currentField = null;
       DocsEnum docs = null;
         
-      for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
+      for (Entry<Term, BufferedDeletes.Num> entry: deletes.terms.entrySet()) {
         Term term = entry.getKey();
         // Since we visit terms sorted, we gain performance
         // by re-using the same TermsEnum and seeking only
@@ -1118,9 +1160,11 @@
             int limit = entry.getValue().getNum();
             while (true) {
               final int docID = docs.nextDoc();
+              IndexWriter.debug2("applyDeletes docID:"+docID);
               if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) {
                 break;
               }
+              IndexWriter.debug2("applyDeletes delete docID:"+docID);
               reader.deleteDocument(docID);
               any = true;
             }
@@ -1130,7 +1174,7 @@
     }
 
     // Delete by docID
-    for (Integer docIdInt : deletesFlushed.docIDs) {
+    for (Integer docIdInt : deletes.docIDs) {
       int docID = docIdInt.intValue();
       if (docID >= docIDStart && docID < docEnd) {
         reader.deleteDocument(docID-docIDStart);
@@ -1139,10 +1183,10 @@
     }
 
     // Delete by query
-    if (deletesFlushed.queries.size() > 0) {
+    if (deletes.queries.size() > 0) {
       IndexSearcher searcher = new IndexSearcher(reader);
       try {
-        for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
+        for (Entry<Query, Integer> entry : deletes.queries.entrySet()) {
           Query query = entry.getKey();
           int limit = entry.getValue().intValue();
           Weight weight = query.weight(searcher);
@@ -1168,18 +1212,36 @@
   // current number of documents buffered in ram so that the
   // delete term will be applied to those documents as well
   // as the disk segments.
-  synchronized private void addDeleteTerm(Term term, int docCount) {
-    BufferedDeletes.Num num = deletesInRAM.terms.get(term);
-    final int docIDUpto = flushedDocCount + docCount;
-    if (num == null)
-      deletesInRAM.terms.put(term, new BufferedDeletes.Num(docIDUpto));
-    else
-      num.setNum(docIDUpto);
-    deletesInRAM.numTerms++;
+  synchronized private void addDeleteTerm(Term term, int docCount, SegmentInfos subinfos) {
+    for (BufferedDeletes dels : getBufferedDeletes(subinfos)) {
+      IndexWriter.debug2("addDeleteTerm: info:"+dels.info);
+      BufferedDeletes.Num num = dels.terms.get(term);
+      final int docIDUpto = flushedDocCount + docCount;
+      if (num == null)
+        dels.terms.put(term, new BufferedDeletes.Num(docIDUpto));
+      else
+        num.setNum(docIDUpto);
+      dels.numTerms++;
 
-    deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.bytes.length);
+      dels.addBytesUsed(BYTES_PER_DEL_TERM + term.bytes.length);
+    }
   }
-
+  
+  synchronized List<BufferedDeletes> getBufferedDeletes(SegmentInfos infos) {
+    List<BufferedDeletes> deletes = new ArrayList<BufferedDeletes>();
+    deletes.add(deletesInRAM);
+    for (SegmentInfo info : infos) {
+      BufferedDeletes segdels = segmentDeletes.get(info);
+      if (segdels == null) {
+        segdels = new BufferedDeletes(false);
+        segdels.info = info;
+        segmentDeletes.put(info, segdels);
+      }
+      deletes.add(segdels);
+    }
+    return deletes;
+  }
+  
   // Buffer a specific docID for deletion.  Currently only
   // used when we hit a exception when adding a document
   synchronized private void addDeleteDocID(int docID) {
@@ -1187,7 +1249,7 @@
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_DOCID);
   }
 
-  synchronized private void addDeleteQuery(Query query, int docID) {
+  synchronized private void addDeleteQuery(Query query, int docID, SegmentInfos subinfos) {
     deletesInRAM.queries.put(query, Integer.valueOf(flushedDocCount + docID));
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
   }
@@ -1268,7 +1330,7 @@
   final SkipDocWriter skipDocWriter = new SkipDocWriter();
 
   long getRAMUsed() {
-    return bytesUsed() + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
+    return bytesUsed() + deletesInRAM.bytesUsed + deletesFlushedLastSeg.bytesUsed + segDeletesBytesUsed;
   }
 
   NumberFormat nf = NumberFormat.getInstance();
@@ -1373,7 +1435,7 @@
         return;
       }
     
-      deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushed.bytesUsed;
+      deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushedLastSeg.bytesUsed;
       doBalance = bytesUsed() +deletesRAMUsed >= ramBufferSize;
     }
 
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 1027655)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -257,7 +257,9 @@
   // BufferedIndexInputs created during merging.  See
   // LUCENE-888 for details.
   private final static int MERGE_READ_BUFFER_SIZE = 4096;
-
+  
+  public static PrintStream DEBUG2 = System.out;
+  
   // Used for printing messages
   private static Object MESSAGE_ID_LOCK = new Object();
   private static int MESSAGE_ID = 0;
@@ -279,9 +281,9 @@
   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
   volatile long pendingCommitChangeCount;
 
-  private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
+  SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
-  private DocumentsWriter docWriter;
+  DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
 
   private Set<SegmentInfo> segmentsToOptimize = new HashSet<SegmentInfo>();           // used by optimize to note those needing optimization
@@ -309,7 +311,7 @@
 
   private int flushCount;
   private int flushDeletesCount;
-
+  int lastSegmentIndex = 0;
   final ReaderPool readerPool = new ReaderPool();
   
   // This is a "write once" variable (like the organic dye
@@ -329,7 +331,7 @@
 
   // The PayloadProcessorProvider to use when segments are merged
   private PayloadProcessorProvider payloadProcessorProvider;
-
+  
   /**
    * Expert: returns a readonly reader, covering all
    * committed as well as un-committed changes to the index.
@@ -408,7 +410,7 @@
     // reader; in theory we could do similar retry logic,
     // just like we do when loading segments_N
     synchronized(this) {
-      applyDeletes();
+      applyAllDeletes();
       final IndexReader r = new DirectoryReader(this, segmentInfos, config.getReaderTermsIndexDivisor(), codecs);
       if (infoStream != null) {
         message("return reader version=" + r.getVersion() + " reader=" + r);
@@ -1108,7 +1110,9 @@
       }
 
       setRollbackSegmentInfos(segmentInfos);
-
+      
+      lastSegmentIndex = segmentInfos.size();
+      
       docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates());
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
@@ -2025,7 +2029,7 @@
     boolean success = false;
     try {
       try {
-        doFlush = docWriter.addDocument(doc, analyzer);
+        doFlush = docWriter.addDocument(doc, analyzer, getLastSegmentInfos());
         success = true;
       } finally {
         if (!success) {
@@ -2065,7 +2069,7 @@
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerm(term);
+      boolean doFlush = docWriter.bufferDeleteTerm(term, getLastSegmentInfos());
       if (doFlush)
         flush(true, false, false);
     } catch (OutOfMemoryError oom) {
@@ -2089,7 +2093,7 @@
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerms(terms);
+      boolean doFlush = docWriter.bufferDeleteTerms(terms, getLastSegmentInfos());
       if (doFlush)
         flush(true, false, false);
     } catch (OutOfMemoryError oom) {
@@ -2110,7 +2114,7 @@
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQuery(query);
+    boolean doFlush = docWriter.bufferDeleteQuery(query, getLastSegmentInfos());
     if (doFlush)
       flush(true, false, false);
   }
@@ -2130,7 +2134,7 @@
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQueries(queries);
+    boolean doFlush = docWriter.bufferDeleteQueries(queries, getLastSegmentInfos());
     if (doFlush)
       flush(true, false, false);
   }
@@ -2182,7 +2186,7 @@
       boolean doFlush = false;
       boolean success = false;
       try {
-        doFlush = docWriter.updateDocument(term, doc, analyzer);
+        doFlush = docWriter.updateDocument(term, doc, analyzer, getLastSegmentInfos());
         success = true;
       } finally {
         if (!success) {
@@ -3120,7 +3124,7 @@
       finishCommit();
     }
   }
-
+  
   /**
    * <p>Commits all pending changes (added & deleted
    * documents, optimizations, segment merges, added
@@ -3155,6 +3159,14 @@
     commit(null);
   }
 
+  synchronized SegmentInfos getLastSegmentInfos() {
+    SegmentInfos infos = new SegmentInfos();
+    for (int x=lastSegmentIndex; x < segmentInfos.size(); x++) {
+      infos.add(segmentInfos.get(x));
+    }
+    return infos;
+  }
+  
   /** Commits all changes to the index, specifying a
    *  commitUserData Map (String -> String).  This just
    *  calls {@link #prepareCommit(Map)} (if you didn't
@@ -3204,6 +3216,7 @@
         segmentInfos.setUserData(pendingCommit.getUserData());
         setRollbackSegmentInfos(pendingCommit);
         deleter.checkpoint(pendingCommit, true);
+        lastSegmentIndex = segmentInfos.size();
       } finally {
         // Matches the incRef done in startCommit:
         deleter.decRef(pendingCommit);
@@ -3249,6 +3262,12 @@
       docWriter.clearFlushPending();
     }
   }
+  
+  static void debug2(String msg) {
+    if (DEBUG2 != null) {
+      DEBUG2.println(msg);
+    }
+  }
 
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
@@ -3258,7 +3277,7 @@
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
     }
-
+    debug2("doFlushInternal");
     ensureOpen(false);
 
     assert testPoint("startDoFlush");
@@ -3401,7 +3420,7 @@
       }
 
       if (flushDeletes) {
-        applyDeletes();
+        applyAllDeletes();
       }
       
       if (flushDocs)
@@ -3574,9 +3593,14 @@
     merge.info.setHasProx(merger.hasProx());
 
     segmentInfos.subList(start, start + merge.segments.size()).clear();
+    
     assert !segmentInfos.contains(merge.info);
     segmentInfos.add(start, merge.info);
 
+    lastSegmentIndex = start;
+    
+    docWriter.verifySegmentDeletes(lastSegmentIndex, segmentInfos);
+    
     // Must note the change to segmentInfos so any commits
     // in-flight don't lose it:
     checkpoint();
@@ -3767,11 +3791,14 @@
     if (merge.isAborted())
       return;
 
-    applyDeletes();
-
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
-
+    
+    docWriter.pushDeletes();
+    
+    // apply deletes only to the source segments
+    docWriter.applyDeletes(sourceSegments);
+    
     // Check whether this merge will allow us to skip
     // merging the doc stores (stored field & vectors).
     // This is a very substantial optimization (saves tons
@@ -3935,7 +3962,6 @@
       mergingSegments.remove(merge.info);
       merge.registerDone = false;
     }
-
     runningMerges.remove(merge);
   }
 
@@ -3980,7 +4006,7 @@
 
     merge.readers = new SegmentReader[numSegments];
     merge.readersClone = new SegmentReader[numSegments];
-
+    
     boolean mergeDocStores = false;
 
     final Set<String> dss = new HashSet<String>();
@@ -4005,6 +4031,8 @@
         // deletes may come in while we're merging so we
         // need readers that will not change
         SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.clone(true);
+        
+        debug2("merge dels: "+clone.getSegmentName()+" numdel:"+clone.numDeletedDocs());
         merger.add(clone);
 
         if (clone.hasDeletions()) {
@@ -4223,12 +4251,12 @@
     if (!mergeExceptions.contains(merge) && mergeGen == merge.mergeGen)
       mergeExceptions.add(merge);
   }
-
-  // Apply buffered deletes to all segments.
-  private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
-    assert testPoint("startApplyDeletes");
+  
+  //Apply buffered deletes to all segments.
+  private final synchronized boolean applyAllDeletes() throws CorruptIndexException, IOException {
+    assert testPoint("startApplyAllDeletes");
     if (infoStream != null) {
-      message("applyDeletes");
+      message("applyAllDeletes");
     }
     flushDeletesCount++;
     boolean success = false;
@@ -4246,7 +4274,7 @@
       checkpoint();
     return changed;
   }
-
+  
   // For test purposes.
   final synchronized int getBufferedDeleteTermsSize() {
     return docWriter.getBufferedDeleteTerms().size();
