Index: src/test/org/apache/lucene/index/TestIndexWriterDelete.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterDelete.java	(revision 1030113)
+++ src/test/org/apache/lucene/index/TestIndexWriterDelete.java	(working copy)
@@ -624,7 +624,7 @@
             boolean seen = false;
             StackTraceElement[] trace = new Exception().getStackTrace();
             for (int i = 0; i < trace.length; i++) {
-              if ("applyDeletes".equals(trace[i].getMethodName())) {
+              if ("applyDeletesAll".equals(trace[i].getMethodName())) {
                 seen = true;
                 break;
               }
@@ -632,13 +632,13 @@
             if (!seen) {
               // Only fail once we are no longer in applyDeletes
               failed = true;
-              throw new IOException("fail after applyDeletes");
+              throw new IOException("fail after applyDeletesAll");
             }
           }
           if (!failed) {
             StackTraceElement[] trace = new Exception().getStackTrace();
             for (int i = 0; i < trace.length; i++) {
-              if ("applyDeletes".equals(trace[i].getMethodName())) {
+              if ("applyDeletesAll".equals(trace[i].getMethodName())) {
                 sawMaybe = true;
                 break;
               }
Index: src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
===================================================================
--- src/test/org/apache/lucene/index/TestPerSegmentDeletes.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestPerSegmentDeletes.java	(revision 0)
@@ -0,0 +1,269 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DocumentsWriter.SegmentDeletes;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
+public class TestPerSegmentDeletes extends LuceneTestCase {
+  public void testDeletes1() throws Exception {
+    IndexWriter.debug2 = System.out;
+    Directory dir = new MockDirectoryWrapper(new RAMDirectory());
+    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_CURRENT,
+        new MockAnalyzer());
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    iwc.setMaxBufferedDocs(5000);
+    iwc.setRAMBufferSizeMB(400);
+    RangeMergePolicy fsmp = new RangeMergePolicy(false);
+    iwc.setMergePolicy(fsmp);
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    for (int x = 0; x < 5; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "1", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    System.out.println("commit1");
+    writer.commit();
+    //assertEquals(1, writer.docWriter.lastSegmentIndex);
+    assertEquals(1, writer.segmentInfos.size());
+    for (int x = 5; x < 10; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "2", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    System.out.println("commit2");
+    writer.commit();
+    //assertEquals(2, writer.docWriter.lastSegmentIndex);
+    assertEquals(2, writer.segmentInfos.size());
+    
+    for (int x = 10; x < 15; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "3", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    System.out.println("flush(false, false, false)");
+    writer.flush(false, false, false);
+    //assertEquals(2, writer.docWriter.lastSegmentIndex);
+    assertEquals(3, writer.segmentInfos.size());
+    
+    // added docs are in the ram buffer
+    for (int x = 15; x < 20; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    assertTrue(writer.numRamDocs() > 0);
+    // delete from the ram buffer
+    writer.deleteDocuments(new Term("id", Integer.toString(13)));
+    
+    Term id3 = new Term("id", Integer.toString(3));
+    
+    // delete from the 1st segment
+    writer.deleteDocuments(id3);
+    
+    assertTrue(writer.numRamDocs() > 0);
+    
+    System.out
+        .println("segdels1:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    // we cause a merge to happen
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    System.out.println("maybeMerge "+writer.segmentInfos);
+    
+    SegmentInfo info0 = writer.segmentInfos.get(0);
+    SegmentInfo info1 = writer.segmentInfos.get(1);
+    
+    writer.maybeMerge();
+    System.out.println("maybeMerge after "+writer.segmentInfos);
+    // there should be docs in RAM
+    assertTrue(writer.numRamDocs() > 0);
+    
+    // assert we've merged the 1 and 2 segments
+    // and still have a segment leftover == 2
+    assertEquals(2, writer.segmentInfos.size());
+    assertFalse(segThere(info0, writer.segmentInfos));
+    assertFalse(segThere(info1, writer.segmentInfos));
+    
+    System.out.println("segdels2:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    IndexReader r = writer.getReader();
+    IndexReader r1 = r.getSequentialSubReaders()[0];
+    printDelDocs(r1.getDeletedDocs());
+    int[] docs = toDocsArray(id3, null, r);
+    System.out.println("id3 docs:"+Arrays.toString(docs));
+    // there shouldn't be any docs for id:3
+    assertTrue(docs == null);
+    r.close();
+    
+    part2(writer, fsmp);
+    
+    // System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
+    System.out.println("close");
+    writer.close();
+    dir.close();
+  }
+  
+  void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
+    for (int x = 20; x < 25; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, true, false);
+    for (int x = 25; x < 30; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, true, false);
+    
+    System.out.println("infos3:"+writer.segmentInfos);
+    
+    Term delterm = new Term("id", "8");
+    writer.deleteDocuments(delterm);
+    System.out.println("segdels3:" + writer.docWriter.deletesToString());
+    
+    fsmp.doMerge = true;
+    fsmp.start = 1;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    // deletes for info1, the newly created segment from the 
+    // merge should have no deletes because they were applied in
+    // the merge
+    SegmentInfo info1 = writer.segmentInfos.get(1);
+    //assertFalse(exists(info1, writer.docWriter.segmentDeletes));
+    
+    System.out.println("infos4:"+writer.segmentInfos);
+    System.out.println("segdels4:" + writer.docWriter.deletesToString());
+  }
+  
+  static boolean exists(SegmentInfo info, Map<SegmentInfo,SegmentDeletes> map) {
+    if (!map.containsKey(info)) return false;
+    SegmentDeletes dels = map.get(info);
+    return dels.any();
+  }
+  
+  boolean segThere(SegmentInfo info, SegmentInfos infos) {
+    for (SegmentInfo si : infos) {
+      if (si.name.equals(info.name)) return true; 
+    }
+    return false;
+  }
+  
+  public static void printDelDocs(Bits bits) {
+    if (bits == null) return;
+    for (int x = 0; x < bits.length(); x++) {
+      System.out.println(x + ":" + bits.get(x));
+    }
+  }
+  
+  public static int[] toDocsArray(Term term, Bits bits, IndexReader reader)
+      throws IOException {
+    Fields fields = MultiFields.getFields(reader);
+    Terms cterms = fields.terms(term.field);
+    TermsEnum ctermsEnum = cterms.iterator();
+    SeekStatus ss = ctermsEnum.seek(new BytesRef(term.text()), false);
+    if (ss.equals(SeekStatus.FOUND)) {
+      DocsEnum docsEnum = ctermsEnum.docs(bits, null);
+      return toArray(docsEnum);
+    }
+    return null;
+  }
+  
+  public static int[] toArray(DocsEnum docsEnum) throws IOException {
+    List<Integer> docs = new ArrayList<Integer>();
+    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsEnum.docID();
+      docs.add(docID);
+    }
+    return ArrayUtil.toIntArray(docs);
+  }
+  
+  public class RangeMergePolicy extends MergePolicy {
+    boolean doMerge = false;
+    int start;
+    int length;
+    
+    private final boolean useCompoundFile;
+    
+    private RangeMergePolicy(boolean useCompoundFile) {
+      this.useCompoundFile = useCompoundFile;
+    }
+    
+    @Override
+    public void close() {}
+    
+    public MergeSpecification findMerges(SegmentInfos segmentInfos)
+        throws CorruptIndexException, IOException {
+      MergeSpecification ms = new MergeSpecification();
+      if (doMerge) {
+        SegmentInfos mergeInfos = new SegmentInfos();
+        for (int x=start; x < (start+length); x++) {
+          mergeInfos.add(segmentInfos.get(x));
+        }
+        OneMerge om = new OneMerge(mergeInfos, false);
+        ms.add(om);
+        doMerge = false;
+        return ms;
+      }
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
+        int maxSegmentCount, Set<SegmentInfo> segmentsToOptimize)
+        throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesToExpungeDeletes(
+        SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public boolean useCompoundDocStore(SegmentInfos segments) {
+      return useCompoundFile;
+    }
+    
+    @Override
+    public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) {
+      return useCompoundFile;
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/BufferedDeletes.java
===================================================================
--- src/java/org/apache/lucene/index/BufferedDeletes.java	(revision 1030113)
+++ src/java/org/apache/lucene/index/BufferedDeletes.java	(working copy)
@@ -49,6 +49,13 @@
       terms = new HashMap<Term,Num>();
     }
   }
+  
+  @Override
+  public String toString() {
+    return "BufferedDeletes [numTerms=" + numTerms + ", terms=" + terms
+        + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed="
+        + bytesUsed + ", doTermSort=" + doTermSort + "]";
+  }
 
   // Number of documents a delete term applies to.
   final static class Num {
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java	(revision 1030113)
+++ src/java/org/apache/lucene/index/SegmentInfo.java	(working copy)
@@ -17,21 +17,23 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.index.codecs.Codec;
-import org.apache.lucene.index.codecs.CodecProvider;
-import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.HashSet;
-import java.util.HashMap;
-import java.util.ArrayList;
 
+import org.apache.lucene.index.DocumentsWriter.SegmentDeletes;
+import org.apache.lucene.index.codecs.Codec;
+import org.apache.lucene.index.codecs.CodecProvider;
+import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+
 /**
  * Information about a segment such as it's name, directory, and files related
  * to the segment.
@@ -43,7 +45,7 @@
   static final int NO = -1;          // e.g. no norms; no deletes;
   static final int YES = 1;          // e.g. have norms; have deletes;
   static final int WITHOUT_GEN = 0;  // a file name that has no GEN in it. 
-
+  public SegmentDeletes deletes = new SegmentDeletes(this);
   public String name;				  // unique name in dir
   public int docCount;				  // number of docs in seg
   public Directory dir;				  // where segment resides
Index: src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/MergePolicy.java	(revision 1030113)
+++ src/java/org/apache/lucene/index/MergePolicy.java	(working copy)
@@ -17,15 +17,17 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.index.DocumentsWriter.SegmentDeletes;
+import org.apache.lucene.index.MergePolicy.MergeSpecification;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.SetOnce;
 import org.apache.lucene.util.SetOnce.AlreadySetException;
 
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Set;
-
 /**
  * <p>Expert: a MergePolicy determines the sequence of
  * primitive merge operations to be used for overall merge
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 1030113)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -17,21 +17,26 @@
  * limitations under the License.
  */
 
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
+
 import java.io.IOException;
 import java.io.PrintStream;
 import java.text.NumberFormat;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.Map;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.MergePolicy.OneMerge;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -43,11 +48,9 @@
 import org.apache.lucene.store.RAMFile;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Constants;
+import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.RecyclingByteBlockAllocator;
 import org.apache.lucene.util.ThreadInterruptedException;
-import org.apache.lucene.util.RamUsageEstimator;
-import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
-import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
 
 /**
  * This class accepts multiple added documents and directly
@@ -117,7 +120,7 @@
  */
 
 final class DocumentsWriter {
-
+  public static final boolean USE_LAST_SEGMENT = true;
   final AtomicLong bytesUsed = new AtomicLong(0);
   IndexWriter writer;
   Directory directory;
@@ -152,7 +155,136 @@
   private final int maxThreadStates;
 
   List<String> newFiles;
+  SegmentInfo lastSegmentInfo;
+  final AtomicLong segmentDeletesBytesUsed = new AtomicLong(0);
+  private static Term[] EMPTY_TERM_ARRAY = new Term[0];
+  SegmentInfos segmentInfos;
+  
+  public String deletesToString() {
+    return "lastSegmentInfo:"+lastSegmentInfo+" deletesInRAM:"+deletesInRAM+" deletesFlushed:"+deletesFlushed;
+  }
+  
+  synchronized void setMergedDeletes(SegmentDeletes mergedDeletes, OneMerge merge) {
+    if (USE_LAST_SEGMENT) {
+      // if the merge is subsuming lastSegmentInfo
+      // then reset lastSegmentInfo to be the newly merged segment
+      if (merge.segments.contains(lastSegmentInfo)) {
+        lastSegmentInfo = merge.info;
+        merge.info.deletes.merge(mergedDeletes);
+      }
+    } else {
+      merge.info.deletes.merge(mergedDeletes);
+    }
+  }
+  
+  synchronized SegmentDeletes getSegmentDeletes(SegmentInfo info) {
+    if (USE_LAST_SEGMENT && lastSegmentInfo != null) {
+      assert segmentInfos.contains(lastSegmentInfo);
+      
+      int idx = segmentInfos.indexOf(info);
+      int lastSegIdx = segmentInfos.indexOf(lastSegmentInfo);
+      if (idx <= lastSegIdx) {
+        return lastSegmentInfo.deletes;
+      }
+      return info.deletes;
+    } else {
+      return info.deletes;
+    }
+  }
+  
+  synchronized void setLastSegmentInfo() {
+    if (lastSegmentInfo != null) {
+      // the last segment info should be in the segment infos
+      assert segmentInfos.contains(lastSegmentInfo);
+    }
+    if (segmentInfos.size() > 0) {
+      lastSegmentInfo = segmentInfos.get(segmentInfos.size()-1);
+    } else {
+      lastSegmentInfo = null;
+    }
+  }
+  
+  static class SegmentDeletes {
+    Set<Term> terms = new HashSet<Term>();
+	  Set<Query> queries = new HashSet<Query>();
+	  Set<Term> appliedTerms = new HashSet<Term>();
+	  Set<Query> appliedQueries = new HashSet<Query>();
+	  SegmentInfo info;
+	  long bytesUsed = 0;
+	  
+	  public SegmentDeletes(SegmentInfo info) {
+	    this.info = info;
+	  }
+	  
+	  void applied() {
+	    appliedTerms.addAll(terms);
+	    appliedQueries.addAll(queries);
+	    terms.clear();
+	    queries.clear();
+	  }
+	  
+	  void merge(SegmentDeletes mergeDels) {
+	    addTerms(mergeDels.terms);
+	    addQueries(mergeDels.queries);
+	    appliedTerms.addAll(mergeDels.appliedTerms);
+	    appliedQueries.addAll(mergeDels.appliedQueries);
+	  }
+	  
+	  void addTerms(Collection<Term> ts) {
+	    for (Term t : ts) {
+	      addTerm(t);
+	    }
+	  }
+	  
+	  void addQueries(Collection<Query> qs) {
+      for (Query q : qs) {
+        addQuery(q);
+      }
+    }
+	  
+	  void addTerm(Term term) {
+      if (terms.add(term)) {
+        // we're only adding the term pointer, not the actual 
+        // term bytes because those have been added for
+        // deletes flushed
+        bytesUsed += BYTES_PER_DEL_TERM;
+      }
+    }
+	  
+	  void addQuery(Query query) {
+      if (queries.add(query)) {
+        bytesUsed += BYTES_PER_DEL_QUERY;
+      }
+    }
+	  
+	  public void clear(DocumentsWriter dw) {
+	    // remove the bytes used by this deletes object
+	    for (int x=0; x < terms.size(); x++) {
+	      dw.segmentDeletesBytesUsed.addAndGet(-BYTES_PER_DEL_TERM);
+	    }
+	    for (int x=0; x < queries.size(); x++) {
+        dw.segmentDeletesBytesUsed.addAndGet(-BYTES_PER_DEL_QUERY);
+      }
+	    terms.clear();
+	    queries.clear();
+	  }
+	  
+	  public Term[] getSortedTerms() {
+	    Term[] arr = terms.toArray(EMPTY_TERM_ARRAY);
+	    Arrays.sort(arr);
+	    return arr;
+	  }
+	  
+	  @Override
+    public String toString() {
+      return "Deletes [terms=" + terms + ", queries=" + queries + "]";
+    }
 
+	  boolean any() {
+      return terms.size() > 0 || queries.size() > 0;
+	  }
+  }
+  
   static class DocState {
     DocumentsWriter docWriter;
     Analyzer analyzer;
@@ -279,6 +411,7 @@
   final DocConsumer consumer;
 
   // Deletes done after the last flush; these are discarded
+  
   // on abort
   private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
 
@@ -329,6 +462,8 @@
     if (consumer instanceof DocFieldProcessor) {
       docFieldProcessor = (DocFieldProcessor) consumer;
     }
+    pushSegmentInfos(writer.segmentInfos);
+    setLastSegmentInfo();
   }
 
   /** Returns true if any of the fields in the current
@@ -709,7 +844,22 @@
     bufferIsFull = false;
     flushPending = false;
   }
-
+  
+  synchronized void pushSegmentInfos(SegmentInfos segmentInfos) {
+    this.segmentInfos = copy(segmentInfos);
+    if (USE_LAST_SEGMENT && lastSegmentInfo != null) {
+      assert segmentInfos.contains(lastSegmentInfo);
+    }
+  }
+  
+  static SegmentInfos copy(SegmentInfos segmentInfos) {
+    SegmentInfos newInfos = new SegmentInfos();
+    for (SegmentInfo info : segmentInfos) {
+      newInfos.add(info);
+    }
+    return newInfos;
+  }
+  
   synchronized void pushDeletes() {
     deletesFlushed.update(deletesInRAM);
   }
@@ -975,7 +1125,7 @@
 
   synchronized boolean deletesFull() {
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + bytesUsed()) >= ramBufferSize) ||
+            (segmentDeletesBytesUsed.get()+deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + bytesUsed()) >= ramBufferSize) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
        ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
   }
@@ -990,7 +1140,7 @@
     // when merges (which always apply deletes) are
     // infrequent.
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
+            (segmentDeletesBytesUsed.get()+deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
        ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
   }
@@ -1009,16 +1159,92 @@
   int getMaxBufferedDeleteTerms() {
     return maxBufferedDeleteTerms;
   }
-
+  
   synchronized boolean hasDeletes() {
-    return deletesFlushed.any();
+    if (deletesFlushed.any()) return true;
+    for (SegmentInfo info : segmentInfos) {
+      if (info.deletes != null) {
+        if (info.deletes.any()) {
+          return true;
+        }
+      }
+    }
+    return false;
   }
+  
+  /**
+   * Apply deletes from the segment deletes map to individual readers.
+   */
+  synchronized boolean applyDeletesToSegments(int start, int length, MergePolicy.OneMerge merge) throws IOException {
+    IndexWriter.debug2("applyDeletesToSegments start:"+start+" length:"+length);
+    if (!hasDeletes())
+      return false;
+    
+    final long t0 = System.currentTimeMillis();
 
-  synchronized boolean applyDeletes(SegmentInfos infos) throws IOException {
+    if (infoStream != null)
+      message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
+              deletesFlushed.docIDs.size() + " deleted docIDs and " +
+              deletesFlushed.queries.size() + " deleted queries on " +
+              + length + " segments.");
 
+    boolean any = false;
+    
+    int lastSegmentIndex = -1;
+    if (lastSegmentInfo != null) {
+      assert segmentInfos.contains(lastSegmentInfo);
+    
+      lastSegmentIndex = segmentInfos.indexOf(lastSegmentInfo);
+    }
+    for (int i = start; i < (length+start); i++) {
+      SegmentInfo info = segmentInfos.info(i);
+      // Make sure we never attempt to apply deletes to
+      // segment in external dir
+      assert info.dir == directory;
+      
+      SegmentDeletes deletes = null;
+      if (USE_LAST_SEGMENT && lastSegmentIndex != -1) {
+        if (i <= lastSegmentIndex) {
+          deletes = lastSegmentInfo.deletes;
+        } else {
+          deletes = info.deletes;
+        }
+      } else {
+        deletes = info.deletes;
+      }
+      
+      if (deletes != null) {
+        SegmentReader reader = writer.readerPool.get(info, false);
+        try {
+          any |= applyDeletesToSegment(deletes, reader);
+        } finally {
+          writer.readerPool.release(reader);
+        }
+      }
+    }
+    
+    // remove the deletes that have been applied 
+    // from the segment deletes map
+    for (int i = start; i < (length+start); i++) {
+      SegmentInfo info = segmentInfos.info(i);
+      SegmentDeletes deletes = info.deletes;
+      if (deletes != null) {
+        deletes.applied();
+      }
+    }
+    
+    if (infoStream != null) {
+      message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
+    }
+    return any;
+  }
+
+  synchronized boolean applyDeletesAll(SegmentInfos infos) throws IOException {
+    
     if (!hasDeletes())
       return false;
-
+    pushSegmentInfos(infos);
+    
     final long t0 = System.currentTimeMillis();
 
     if (infoStream != null)
@@ -1035,11 +1261,14 @@
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
+      SegmentInfo info = infos.info(i);
+      assert info.dir == directory;
 
-      SegmentReader reader = writer.readerPool.get(infos.info(i), false);
+      SegmentReader reader = writer.readerPool.get(info, false);
       try {
-        any |= applyDeletes(reader, docStart);
+        SegmentDeletes sd = getSegmentDeletes(info);
+        any |= applyAllDeletesPerReader(reader, docStart, sd);
+        // clear out this segment's deletes
         docStart += reader.maxDoc();
       } finally {
         writer.readerPool.release(reader);
@@ -1047,13 +1276,16 @@
     }
 
     deletesFlushed.clear();
+    
+    setLastSegmentInfo();
+    
     if (infoStream != null) {
       message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
     }
-
+    
     return any;
   }
-
+  
   // used only by assert
   private Term lastDeleteTerm;
 
@@ -1065,10 +1297,106 @@
     lastDeleteTerm = term;
     return true;
   }
+  
+  synchronized int getLastSegmentIndex(SegmentInfos infos) {
+    if (USE_LAST_SEGMENT) {
+      if (this.lastSegmentInfo == null) {
+        return 0;
+      }
+      return infos.indexOf(lastSegmentInfo);
+    } else {
+      return 0;
+    }
+  }
+  
+  private final synchronized boolean applyDeletesToSegment(SegmentDeletes deletes, SegmentReader reader) throws CorruptIndexException, IOException {
+    IndexWriter.debug2("applyDeletesToSegment seg:"+reader.getSegmentName());
+    boolean any = false;
 
+    assert checkDeleteTerm(null);
+
+    // Delete by term
+    if (deletes.terms.size() > 0) {
+      Fields fields = reader.fields();
+      if (fields == null) {
+        // This reader has no postings
+        return false;
+      }
+
+      TermsEnum termsEnum = null;
+      
+      String currentField = null;
+      DocsEnum docs = null;
+      
+      for (Term term : deletes.getSortedTerms()) {
+        // Since we visit terms sorted, we gain performance
+        // by re-using the same TermsEnum and seeking only
+        // forwards
+        if (term.field() != currentField) {
+          assert currentField == null || currentField.compareTo(term.field()) < 0;
+          currentField = term.field();
+          Terms terms = fields.terms(currentField);
+          if (terms != null) {
+            termsEnum = terms.iterator();
+          } else {
+            termsEnum = null;
+          }
+        }
+        
+        if (termsEnum == null) {
+          continue;
+        }
+        assert checkDeleteTerm(term);
+        
+        if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) {
+          DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
+          
+          if (docsEnum != null) {
+            docs = docsEnum;
+            while (true) {
+              final int docID = docs.nextDoc();
+              if (docID == DocsEnum.NO_MORE_DOCS) {
+                break;
+              }
+              reader.deleteDocument(docID);
+              any = true;
+            }
+          }
+        }
+      }
+    }
+
+    // Delete by query
+    if (deletes.queries.size() > 0) {
+      IndexSearcher searcher = new IndexSearcher(reader);
+      try {
+        for (Query query : deletes.queries) {
+          Weight weight = query.weight(searcher);
+          Scorer scorer = weight.scorer(reader, true, false);
+          if (scorer != null) {
+            while(true)  {
+              int doc = scorer.nextDoc();
+              if (doc == DocsEnum.NO_MORE_DOCS) {
+                break;
+              }
+              reader.deleteDocument(doc);
+              any = true;
+            }
+          }
+        }
+      } finally {
+        searcher.close();
+      }
+    }
+    return any;
+  }
+  
   // Apply buffered delete terms, queries and docIDs to the
   // provided reader
-  private final synchronized boolean applyDeletes(IndexReader reader, int docIDStart)
+  // apply deletes only if they also exist in the segment deletes object
+  // or the reader is newly flushed
+  private final synchronized boolean applyAllDeletesPerReader(IndexReader reader, int docIDStart, 
+      SegmentDeletes segmentDeletes)
     throws CorruptIndexException, IOException {
 
     final int docEnd = docIDStart + reader.maxDoc();
@@ -1091,6 +1419,14 @@
         
       for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
         Term term = entry.getKey();
+        
+        if (segmentDeletes != null 
+          && segmentDeletes.appliedTerms != null
+          && segmentDeletes.appliedTerms.contains(term)) {
+          // this term must have already been applied for a merge
+          // because it's not also in the per-segment deletes
+          continue;
+        }
         // Since we visit terms sorted, we gain performance
         // by re-using the same TermsEnum and seeking only
         // forwards
@@ -1144,6 +1480,15 @@
       try {
         for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
           Query query = entry.getKey();
+          
+          if (segmentDeletes != null 
+            && segmentDeletes.appliedQueries != null
+            && segmentDeletes.appliedQueries.contains(query)) {
+            // this query must have already been applied for a merge
+            // because it's not also in the per-segment deletes
+            continue;
+          }
+          
           int limit = entry.getValue().intValue();
           Weight weight = query.weight(searcher);
           Scorer scorer = weight.scorer(reader, true, false);
@@ -1178,7 +1523,25 @@
     deletesInRAM.numTerms++;
 
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.bytes.length);
+    
+    if (segmentInfos.size() > 0) {      
+      int start = getLastSegmentIndex(segmentInfos);
+      
+      for (int x=start; x < segmentInfos.size(); x++) {
+        SegmentInfo info = segmentInfos.get(x);
+        addDeleteTermToSegment(info, term);
+      }
+    }
   }
+  
+  synchronized void addDeleteTermToSegment(SegmentInfo info, Term term) {
+    if (info.deletes == null) {
+      info.deletes = new SegmentDeletes(info);
+    }
+    if (!info.deletes.appliedTerms.contains(term)) {
+      info.deletes.addTerm(term);
+    }
+  }
 
   // Buffer a specific docID for deletion.  Currently only
   // used when we hit a exception when adding a document
@@ -1190,6 +1553,19 @@
   synchronized private void addDeleteQuery(Query query, int docID) {
     deletesInRAM.queries.put(query, Integer.valueOf(flushedDocCount + docID));
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
+    
+    if (segmentInfos.size() > 0) {    
+      // add the delete query to the segments after lastSegmentIndex
+      for (int x=0; x < segmentInfos.size(); x++) {
+        SegmentInfo info = segmentInfos.get(x);
+        if (info.deletes == null) {
+          info.deletes = new SegmentDeletes(info);
+        }
+        if (!info.deletes.appliedQueries.contains(query)) {
+          info.deletes.queries.add(query);
+        }
+      }
+    }
   }
 
   /** Does the synchronized work to finish/flush the
@@ -1373,7 +1749,7 @@
         return;
       }
     
-      deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushed.bytesUsed;
+      deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushed.bytesUsed+segmentDeletesBytesUsed.get();
       doBalance = bytesUsed() +deletesRAMUsed >= ramBufferSize;
     }
 
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 1030113)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DocumentsWriter.SegmentDeletes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.PayloadProcessorProvider.DirPayloadProcessor;
 import org.apache.lucene.search.Similarity;
@@ -279,9 +280,9 @@
   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
   volatile long pendingCommitChangeCount;
 
-  private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
+  SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
-  private DocumentsWriter docWriter;
+  DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
 
   private Set<SegmentInfo> segmentsToOptimize = new HashSet<SegmentInfo>();           // used by optimize to note those needing optimization
@@ -329,7 +330,14 @@
 
   // The PayloadProcessorProvider to use when segments are merged
   private PayloadProcessorProvider payloadProcessorProvider;
-
+  
+  static PrintStream debug2 = null;
+  static void debug2(String msg) {
+    if (debug2 != null) {
+      debug2.println(msg);
+    }
+  }
+  
   /**
    * Expert: returns a readonly reader, covering all
    * committed as well as un-committed changes to the index.
@@ -401,21 +409,20 @@
     // obtained during this flush are pooled, the first time
     // this method is called:
     poolReaders = true;
-
+    
     flush(true, true, false);
 
     // Prevent segmentInfos from changing while opening the
     // reader; in theory we could do similar retry logic,
     // just like we do when loading segments_N
     synchronized(this) {
-      applyDeletes();
+      applyDeletesAll();
       final IndexReader r = new DirectoryReader(this, segmentInfos, config.getReaderTermsIndexDivisor(), codecs);
       if (infoStream != null) {
         message("return reader version=" + r.getVersion() + " reader=" + r);
       }
       return r;
     }
-
   }
 
   /** Holds shared SegmentReader instances. IndexWriter uses
@@ -1106,13 +1113,11 @@
             message("init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
         }
       }
-
       setRollbackSegmentInfos(segmentInfos);
 
       docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates());
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
-
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
       deleter = new IndexFileDeleter(directory,
@@ -2001,8 +2006,21 @@
   public void addDocument(Document doc) throws CorruptIndexException, IOException {
     addDocument(doc, analyzer);
   }
-
+  
   /**
+  synchronized SegmentInfos getSegmentInfos(boolean copy) {
+    if (copy) {
+      SegmentInfos copiedInfos = new SegmentInfos();
+      for (SegmentInfo info : segmentInfos) {
+        copiedInfos.add(info);
+      }
+      return copiedInfos;
+    } else {
+      return segmentInfos;
+    }
+  }
+  **/
+  /**
    * Adds a document to this index, using the provided analyzer instead of the
    * value of {@link #getAnalyzer()}.  If the document contains more than
    * {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
@@ -2063,6 +2081,7 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+    debug2("deleteDocuments term:"+term);
     ensureOpen();
     try {
       boolean doFlush = docWriter.bufferDeleteTerm(term);
@@ -2109,6 +2128,7 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
+    debug2("deleteDocuments query:"+query);
     ensureOpen();
     boolean doFlush = docWriter.bufferDeleteQuery(query);
     if (doFlush)
@@ -3219,7 +3239,7 @@
   }
 
   /**
-   * Flush all in-memory buffered udpates (adds and deletes)
+   * Flush all in-memory buffered updates (adds and deletes)
    * to the Directory.
    * @param triggerMerge if true, we may merge segments (if
    *  deletes or docs were flushed) if necessary
@@ -3380,6 +3400,7 @@
       if (flushDocs) {
         segmentInfos.add(newSegment);
         checkpoint();
+        docWriter.pushSegmentInfos(segmentInfos);
       }
 
       if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
@@ -3401,7 +3422,7 @@
       }
 
       if (flushDeletes) {
-        applyDeletes();
+        applyDeletesAll();
       }
       
       if (flushDocs)
@@ -3568,19 +3589,33 @@
     final int start = ensureContiguousMerge(merge);
 
     commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
+    
+    synchronized (docWriter) {
+      docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
+    
+      setMergeDocStoreIsCompoundFile(merge);
+      merge.info.setHasProx(merger.hasProx());
+    
+      segmentInfos.subList(start, start + merge.segments.size()).clear();
+      assert !segmentInfos.contains(merge.info);
+      segmentInfos.add(start, merge.info);
+        
+      // Must note the change to segmentInfos so any commits
+      // in-flight don't lose it:
+      checkpoint();
       
-    setMergeDocStoreIsCompoundFile(merge);
-    merge.info.setHasProx(merger.hasProx());
-
-    segmentInfos.subList(start, start + merge.segments.size()).clear();
-    assert !segmentInfos.contains(merge.info);
-    segmentInfos.add(start, merge.info);
-
-    // Must note the change to segmentInfos so any commits
-    // in-flight don't lose it:
-    checkpoint();
-
+      // merge the pending segment deletes, if there are any
+      SegmentDeletes mergedDeletes = new SegmentDeletes(merge.info);
+      for (SegmentInfo info : merge.segments) {
+        SegmentDeletes sd = docWriter.getSegmentDeletes(info);
+        if (sd != null) {
+          mergedDeletes.merge(sd);
+        }
+      }
+      docWriter.setMergedDeletes(mergedDeletes, merge);
+      docWriter.pushSegmentInfos(segmentInfos);
+    }
+    
     // If the merged segments had pending changes, clear
     // them so that they don't bother writing them to
     // disk, updating SegmentInfo, etc.:
@@ -3683,6 +3718,7 @@
 
   /** Hook that's called when the specified merge is complete. */
   void mergeSuccess(MergePolicy.OneMerge merge) {
+    
   }
   
   /** Checks whether this merge involves any segments
@@ -3767,11 +3803,14 @@
     if (merge.isAborted())
       return;
 
-    applyDeletes();
-
     final SegmentInfos sourceSegments = merge.segments;
+    
+    int start = ensureContiguousMerge(merge);
+    
     final int end = sourceSegments.size();
-
+        
+    docWriter.applyDeletesToSegments(start, end, merge);
+        
     // Check whether this merge will allow us to skip
     // merging the doc stores (stored field & vectors).
     // This is a very substantial optimization (saves tons
@@ -3935,7 +3974,7 @@
       mergingSegments.remove(merge.info);
       merge.registerDone = false;
     }
-
+    
     runningMerges.remove(merge);
   }
 
@@ -4225,16 +4264,16 @@
   }
 
   // Apply buffered deletes to all segments.
-  private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
+  private final synchronized boolean applyDeletesAll() throws CorruptIndexException, IOException {
     assert testPoint("startApplyDeletes");
     if (infoStream != null) {
       message("applyDeletes");
     }
     flushDeletesCount++;
     boolean success = false;
-    boolean changed;
+    boolean changed = false;
     try {
-      changed = docWriter.applyDeletes(segmentInfos);
+      changed = docWriter.applyDeletesAll(segmentInfos);
       success = true;
     } finally {
       if (!success && infoStream != null) {
