Index: src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing2.java	(revision 1039978)
+++ src/test/org/apache/lucene/index/TestStressIndexing2.java	(working copy)
@@ -91,6 +91,18 @@
     dir2.close();
   }
 
+  public void testMultiConfigMany() throws Throwable {
+    for (int x=0; x < 500; x++) {
+      try {
+        testMultiConfig();
+      } catch (Throwable th) {
+        System.err.println("x:"+x);
+        //th.printStackTrace();
+        throw th;
+      }
+    }
+  }
+  
   public void testMultiConfig() throws Throwable {
     // test lots of smaller different params together
 
Index: src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
===================================================================
--- src/test/org/apache/lucene/index/TestPerSegmentDeletes.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestPerSegmentDeletes.java	(revision 0)
@@ -0,0 +1,302 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
+public class TestPerSegmentDeletes extends LuceneTestCase {
+  public void testDeletes1() throws Exception {
+    //IndexWriter.debug2 = System.out;
+    Directory dir = new MockDirectoryWrapper(new Random(), new RAMDirectory());
+    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_CURRENT,
+        new MockAnalyzer());
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    iwc.setMaxBufferedDocs(5000);
+    iwc.setRAMBufferSizeMB(100);
+    RangeMergePolicy fsmp = new RangeMergePolicy(false);
+    iwc.setMergePolicy(fsmp);
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    Document doc = new Document();
+    for (int x = 0; x < 5; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "1", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    System.out.println("commit1");
+    writer.commit();
+    assertEquals(1, writer.segmentInfos.size());
+    for (int x = 5; x < 10; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "2", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    System.out.println("commit2");
+    writer.commit();
+    assertEquals(2, writer.segmentInfos.size());
+
+    for (int x = 10; x < 15; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "3", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    
+    writer.deleteDocuments(new Term("id", "1"));
+    
+    writer.deleteDocuments(new Term("id", "11"));
+    
+    // flushing without applying deletes means 
+    // there will still be deletes in the segment infos
+    writer.flush(false, false, false);
+    assertTrue(writer.segmentDeletes.hasDeletes());
+    
+    // get reader flushes pending deletes
+    // so there should not be anymore
+    IndexReader r1 = writer.getReader();
+    assertFalse(writer.segmentDeletes.hasDeletes());
+    r1.close();
+    
+    // delete id:2 from the first segment
+    // merge segments 0 and 1
+    // which should apply the delete id:2
+    writer.deleteDocuments(new Term("id", "2"));
+    writer.flush(false, false, false);
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    assertEquals(2, writer.segmentInfos.size());
+    
+    // id:2 shouldn't exist anymore because
+    // it's been applied in the merge and now it's gone
+    IndexReader r2 = writer.getReader();
+    int[] id2docs = toDocsArray(new Term("id", "2"), null, r2);
+    assertTrue(id2docs == null);
+    r2.close();
+    
+    /**
+    // added docs are in the ram buffer
+    for (int x = 15; x < 20; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    assertTrue(writer.numRamDocs() > 0);
+    // delete from the ram buffer
+    writer.deleteDocuments(new Term("id", Integer.toString(13)));
+    
+    Term id3 = new Term("id", Integer.toString(3));
+    
+    // delete from the 1st segment
+    writer.deleteDocuments(id3);
+    
+    assertTrue(writer.numRamDocs() > 0);
+    
+    //System.out
+    //    .println("segdels1:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    // we cause a merge to happen
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    System.out.println("maybeMerge "+writer.segmentInfos);
+    
+    SegmentInfo info0 = writer.segmentInfos.get(0);
+    SegmentInfo info1 = writer.segmentInfos.get(1);
+    
+    writer.maybeMerge();
+    System.out.println("maybeMerge after "+writer.segmentInfos);
+    // there should be docs in RAM
+    assertTrue(writer.numRamDocs() > 0);
+    
+    // assert we've merged the 1 and 2 segments
+    // and still have a segment leftover == 2
+    assertEquals(2, writer.segmentInfos.size());
+    assertFalse(segThere(info0, writer.segmentInfos));
+    assertFalse(segThere(info1, writer.segmentInfos));
+    
+    //System.out.println("segdels2:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    IndexReader r = writer.getReader();
+    IndexReader r1 = r.getSequentialSubReaders()[0];
+    printDelDocs(r1.getDeletedDocs());
+    int[] docs = toDocsArray(id3, null, r);
+    System.out.println("id3 docs:"+Arrays.toString(docs));
+    // there shouldn't be any docs for id:3
+    assertTrue(docs == null);
+    r.close();
+    
+    part2(writer, fsmp);
+    **/
+    // System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
+    System.out.println("close");
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+  static boolean hasPendingDeletes(SegmentInfos infos) {
+    for (SegmentInfo info : infos) {
+      if (info.deletes.any()) {
+        return true;
+      }
+    }
+    return false;
+  }
+  **/
+  void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
+    for (int x = 20; x < 25; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, true, false);
+    for (int x = 25; x < 30; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, true, false);
+    
+    System.out.println("infos3:"+writer.segmentInfos);
+    
+    Term delterm = new Term("id", "8");
+    writer.deleteDocuments(delterm);
+    //System.out.println("segdels3:" + writer.docWriter.deletesToString());
+    
+    fsmp.doMerge = true;
+    fsmp.start = 1;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    // deletes for info1, the newly created segment from the 
+    // merge should have no deletes because they were applied in
+    // the merge
+    SegmentInfo info1 = writer.segmentInfos.get(1);
+    //assertFalse(exists(info1, writer.docWriter.segmentDeletes));
+    
+    System.out.println("infos4:"+writer.segmentInfos);
+    //System.out.println("segdels4:" + writer.docWriter.deletesToString());
+  }
+  
+  boolean segThere(SegmentInfo info, SegmentInfos infos) {
+    for (SegmentInfo si : infos) {
+      if (si.name.equals(info.name)) return true; 
+    }
+    return false;
+  }
+  
+  public static void printDelDocs(Bits bits) {
+    if (bits == null) return;
+    for (int x = 0; x < bits.length(); x++) {
+      System.out.println(x + ":" + bits.get(x));
+    }
+  }
+  
+  public static int[] toDocsArray(Term term, Bits bits, IndexReader reader)
+      throws IOException {
+    Fields fields = MultiFields.getFields(reader);
+    Terms cterms = fields.terms(term.field);
+    TermsEnum ctermsEnum = cterms.iterator();
+    SeekStatus ss = ctermsEnum.seek(new BytesRef(term.text()), false);
+    if (ss.equals(SeekStatus.FOUND)) {
+      DocsEnum docsEnum = ctermsEnum.docs(bits, null);
+      return toArray(docsEnum);
+    }
+    return null;
+  }
+  
+  public static int[] toArray(DocsEnum docsEnum) throws IOException {
+    List<Integer> docs = new ArrayList<Integer>();
+    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsEnum.docID();
+      docs.add(docID);
+    }
+    return ArrayUtil.toIntArray(docs);
+  }
+  
+  public class RangeMergePolicy extends MergePolicy {
+    boolean doMerge = false;
+    int start;
+    int length;
+    
+    private final boolean useCompoundFile;
+    
+    private RangeMergePolicy(boolean useCompoundFile) {
+      this.useCompoundFile = useCompoundFile;
+    }
+    
+    @Override
+    public void close() {}
+    
+    public MergeSpecification findMerges(SegmentInfos segmentInfos)
+        throws CorruptIndexException, IOException {
+      MergeSpecification ms = new MergeSpecification();
+      if (doMerge) {
+        SegmentInfos mergeInfos = new SegmentInfos();
+        for (int x=start; x < (start+length); x++) {
+          mergeInfos.add(segmentInfos.get(x));
+        }
+        OneMerge om = new OneMerge(mergeInfos, false);
+        ms.add(om);
+        doMerge = false;
+        return ms;
+      }
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
+        int maxSegmentCount, Set<SegmentInfo> segmentsToOptimize)
+        throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesToExpungeDeletes(
+        SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public boolean useCompoundDocStore(SegmentInfos segments) {
+      return useCompoundFile;
+    }
+    
+    @Override
+    public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) {
+      return useCompoundFile;
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/BufferedDeletes.java
===================================================================
--- src/java/org/apache/lucene/index/BufferedDeletes.java	(revision 1039978)
+++ src/java/org/apache/lucene/index/BufferedDeletes.java	(working copy)
@@ -38,7 +38,7 @@
   Map<Term,Num> terms;
   Map<Query,Integer> queries = new HashMap<Query,Integer>();
   List<Integer> docIDs = new ArrayList<Integer>();
-  long bytesUsed;
+  int bytesUsed;
   private final boolean doTermSort;
 
   public BufferedDeletes(boolean doTermSort) {
@@ -50,6 +50,13 @@
     }
   }
 
+  @Override
+  public String toString() {
+    return "BufferedDeletes [numTerms=" + numTerms + ", terms=" + terms
+        + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed="
+        + bytesUsed + ", doTermSort=" + doTermSort + "]";
+  }
+
   // Number of documents a delete term applies to.
   final static class Num {
     private int num;
@@ -58,6 +65,11 @@
       this.num = num;
     }
 
+    @Override
+    public String toString() {
+      return "Num [num=" + num + "]";
+    }
+
     int getNum() {
       return num;
     }
@@ -81,14 +93,23 @@
     // dup'd terms are counted towards that 1000
     return numTerms + queries.size() + docIDs.size();
   }
-
+  
+  void update(BufferedDeletes in, int limit) {
+    numTerms += in.numTerms;
+    bytesUsed += in.bytesUsed;
+    for (Term term : in.terms.keySet()) {
+      terms.put(term, new BufferedDeletes.Num(limit));
+    }
+    for (Query query : in.queries.keySet()) {
+      queries.put(query, limit);
+    }
+  }
+  
   void update(BufferedDeletes in) {
     numTerms += in.numTerms;
     bytesUsed += in.bytesUsed;
     terms.putAll(in.terms);
     queries.putAll(in.queries);
-    docIDs.addAll(in.docIDs);
-    in.clear();
   }
     
   void clear() {
@@ -98,7 +119,11 @@
     numTerms = 0;
     bytesUsed = 0;
   }
-
+  
+  void clearDocIDs() {
+    docIDs.clear();
+  }
+  
   void addBytesUsed(long b) {
     bytesUsed += b;
   }
@@ -106,64 +131,4 @@
   boolean any() {
     return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
   }
-
-  // Remaps all buffered deletes based on a completed
-  // merge
-  synchronized void remap(MergeDocIDRemapper mapper,
-                          SegmentInfos infos,
-                          int[][] docMaps,
-                          int[] delCounts,
-                          MergePolicy.OneMerge merge,
-                          int mergeDocCount) {
-
-    final Map<Term,Num> newDeleteTerms;
-
-    // Remap delete-by-term
-    if (terms.size() > 0) {
-      if (doTermSort) {
-        newDeleteTerms = new TreeMap<Term,Num>();
-      } else {
-        newDeleteTerms = new HashMap<Term,Num>();
-      }
-      for(Entry<Term,Num> entry : terms.entrySet()) {
-        Num num = entry.getValue();
-        newDeleteTerms.put(entry.getKey(),
-                           new Num(mapper.remap(num.getNum())));
-      }
-    } else 
-      newDeleteTerms = null;
-    
-
-    // Remap delete-by-docID
-    final List<Integer> newDeleteDocIDs;
-
-    if (docIDs.size() > 0) {
-      newDeleteDocIDs = new ArrayList<Integer>(docIDs.size());
-      for (Integer num : docIDs) {
-        newDeleteDocIDs.add(Integer.valueOf(mapper.remap(num.intValue())));
-      }
-    } else 
-      newDeleteDocIDs = null;
-    
-
-    // Remap delete-by-query
-    final HashMap<Query,Integer> newDeleteQueries;
-    
-    if (queries.size() > 0) {
-      newDeleteQueries = new HashMap<Query, Integer>(queries.size());
-      for(Entry<Query,Integer> entry: queries.entrySet()) {
-        Integer num = entry.getValue();
-        newDeleteQueries.put(entry.getKey(),
-                             Integer.valueOf(mapper.remap(num.intValue())));
-      }
-    } else
-      newDeleteQueries = null;
-
-    if (newDeleteTerms != null)
-      terms = newDeleteTerms;
-    if (newDeleteDocIDs != null)
-      docIDs = newDeleteDocIDs;
-    if (newDeleteQueries != null)
-      queries = newDeleteQueries;
-  }
 }
\ No newline at end of file
Index: src/java/org/apache/lucene/index/SegmentDeletes.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentDeletes.java	(revision 0)
+++ src/java/org/apache/lucene/index/SegmentDeletes.java	(revision 0)
@@ -0,0 +1,431 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+
+class SegmentDeletes {
+  static boolean DEBUG = false;
+  /* Rough logic: HashMap has an array[Entry] w/ varying
+     load factor (say 2 * POINTER).  Entry is object w/ Term
+     key, BufferedDeletes.Num val, int hash, Entry next
+     (OBJ_HEADER + 3*POINTER + INT).  Term is object w/
+     String field and String text (OBJ_HEADER + 2*POINTER).
+     We don't count Term's field since it's interned.
+     Term's text is String (OBJ_HEADER + 4*INT + POINTER +
+     OBJ_HEADER + string.length*CHAR).  BufferedDeletes.num is
+     OBJ_HEADER + INT. */
+  
+  final static int BYTES_PER_DEL_TERM = 8*DocumentsWriter.POINTER_NUM_BYTE + 5*DocumentsWriter.OBJECT_HEADER_BYTES + 6*DocumentsWriter.INT_NUM_BYTE;
+
+  /* Rough logic: del docIDs are List<Integer>.  Say list
+     allocates ~2X size (2*POINTER).  Integer is OBJ_HEADER
+     + int */
+  final static int BYTES_PER_DEL_DOCID = 2*DocumentsWriter.POINTER_NUM_BYTE + DocumentsWriter.OBJECT_HEADER_BYTES + DocumentsWriter.INT_NUM_BYTE;
+
+  /* Rough logic: HashMap has an array[Entry] w/ varying
+     load factor (say 2 * POINTER).  Entry is object w/
+     Query key, Integer val, int hash, Entry next
+     (OBJ_HEADER + 3*POINTER + INT).  Query we often
+     undercount (say 24 bytes).  Integer is OBJ_HEADER + INT. */
+  final static int BYTES_PER_DEL_QUERY = 5*DocumentsWriter.POINTER_NUM_BYTE + 2*DocumentsWriter.OBJECT_HEADER_BYTES + 2*DocumentsWriter.INT_NUM_BYTE + 24;
+  
+  private Map<SegmentInfo,BufferedDeletes> deletesMap = new HashMap<SegmentInfo,BufferedDeletes>();
+  private BufferedDeletes deletesInRAM = new BufferedDeletes(true);
+  
+  // used only by assert
+  private Term lastDeleteTerm;
+  
+  private IndexWriter writer;
+  private PrintStream infoStream;
+  private AtomicInteger bytesUsed = new AtomicInteger(0);
+  private AtomicInteger numTerms = new AtomicInteger(0);
+  
+  public SegmentDeletes(IndexWriter writer) {
+    this.writer = writer;
+    this.infoStream = writer.getInfoStream();
+  }
+  
+  long getRAMBufferSize() {
+    double ramBufferMB = writer.getConfig().getRAMBufferSizeMB();
+    if (ramBufferMB == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
+      return IndexWriterConfig.DISABLE_AUTO_FLUSH;
+    }
+    return (long) (ramBufferMB*1024*1024);
+  }
+  
+  int getMaxBufferedDeleteTerms() {
+    return writer.getConfig().getMaxBufferedDeleteTerms();
+  }
+  
+  synchronized boolean doApplyDeletes() {
+    // Very similar to deletesFull(), except we don't count
+    // numBytesUsed, because we are checking whether
+    // deletes (alone) are consuming too many resources now
+    // and thus should be applied.  We apply deletes if RAM
+    // usage is > 1/2 of our allowed RAM buffer, to prevent
+    // too-frequent flushing of a long tail of tiny segments
+    // when merges (which always apply deletes) are
+    // infrequent.
+    long ramBufferSize = getRAMBufferSize();
+    int maxBufferedDeleteTerms = getMaxBufferedDeleteTerms();
+    return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+            (writer.segmentDeletes.bytesUsed()) >= ramBufferSize/2) ||
+      (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+       ((writer.segmentDeletes.numTerms()) >= maxBufferedDeleteTerms));
+  }
+  
+  synchronized boolean deletesFull(DocumentsWriter docWriter) {
+    long ramBufferSize = getRAMBufferSize();
+    int maxBufferedDeleteTerms = getMaxBufferedDeleteTerms();
+    return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+            (writer.segmentDeletes.bytesUsed() + docWriter.bytesUsed()) >= ramBufferSize) ||
+      (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+       ((writer.segmentDeletes.numTerms()) >= maxBufferedDeleteTerms));
+  }
+  
+  void setInfoStream(PrintStream infoStream) {
+    this.infoStream = infoStream;
+  }
+  
+  synchronized void pushDeletes(SegmentInfo info, int limit, boolean clear) {
+    BufferedDeletes deletes = getDeletes(info);
+    deletes.update(deletesInRAM, limit);
+    if (clear) {
+      deletesInRAM.clear();
+    }
+  }
+  
+  synchronized void pushDeletes(SegmentInfo info) {
+    BufferedDeletes deletes = getDeletes(info);
+    deletes.update(deletesInRAM);
+    deletesInRAM.clear();
+  }
+  
+  boolean hasDeletes() {
+    return numTerms() > 0 || numQueries() > 0;
+  }
+  
+  synchronized int numQueries() {
+    int total = deletesInRAM.queries.size();
+    for (BufferedDeletes deletes : deletesMap.values()) {
+      total += deletes.queries.size();
+    }
+    return total;
+  }
+  
+  synchronized int numTerms() {
+    int total = deletesInRAM.numTerms;
+    for (BufferedDeletes deletes : deletesMap.values()) {
+      total += deletes.numTerms;
+    }
+    return total;
+  }
+  
+  int bytesUsed() {
+    return bytesUsed.get();
+  }
+  
+  /**
+   * Merge the pending deletes of the merged segments, 
+   * and remove them from the info -> deletes map.
+   */
+  synchronized void mergeAndRemove(MergePolicy.OneMerge merge) {
+    BufferedDeletes mergedDeletes = getDeletes(merge.info);
+    for (SegmentInfo info : merge.segments) {
+      BufferedDeletes infoDeletes = getDeletes(info);
+      
+      assert infoDeletes.docIDs.size() == 0;
+      
+      mergedDeletes.update(infoDeletes, Integer.MAX_VALUE);
+    }
+    remove(merge.segments);
+  }
+  
+  synchronized boolean hasDeletesInRAM() {
+    return deletesInRAM.size() > 0;
+  }
+  
+  synchronized void clearPending() {
+    clear(deletesInRAM);
+  }
+  
+  synchronized void clear(BufferedDeletes deletes) {
+    deletes.clear();
+  }
+  
+  //Buffer a specific docID for deletion.  Currently only
+  // used when we hit a exception when adding a document
+  synchronized void addDeleteDocID(int docID) {
+    deletesInRAM.docIDs.add(Integer.valueOf(docID));
+    deletesInRAM.addBytesUsed(BYTES_PER_DEL_DOCID);
+  }
+  
+  synchronized void addDeleteQueries(int docCount, Query... queries) {
+    for (Query query : queries) {
+      addDeleteQuery(query, docCount);
+    }
+  }
+  
+  synchronized void addDeleteQuery(Query query, int docID) { 
+    deletesInRAM.queries.put(query, Integer.valueOf(docID));
+    deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
+  }
+  
+  synchronized void addDeleteTerms(int docCount, Term... terms) {
+    for (Term term : terms) {
+      addDeleteTerm(term, docCount);
+    }
+  }
+  
+  synchronized void addDeleteTerm(Term term, int docCount) {
+    BufferedDeletes.Num num = deletesInRAM.terms.get(term);
+    if (num == null)
+      deletesInRAM.terms.put(term, new BufferedDeletes.Num(docCount));
+    else
+      num.setNum(docCount);
+    deletesInRAM.numTerms++;
+
+    deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.bytes.length);
+  }
+  
+  // used only by assert
+  private boolean checkDeleteTerm(Term term) {
+    if (term != null) {
+      assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term;
+    }
+    lastDeleteTerm = term;
+    return true;
+  }
+  
+  private synchronized void printDeletes(SegmentInfos segmentInfos) {
+    if (DEBUG) {
+      for (SegmentInfo info : segmentInfos) {
+        BufferedDeletes deletes = getDeletes(info);
+        System.out.println("segment:"+info.name+" "+deletes);
+      }
+    }
+  }
+  
+  public synchronized boolean applyDeletes(SegmentInfos segmentInfos, SegmentInfos applyInfos) throws IOException {
+    final long t0 = System.currentTimeMillis();
+
+    //if (infoStream != null)
+    //  writer.message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
+    //          deletesFlushed.docIDs.size() + " deleted docIDs and " +
+    //          deletesFlushed.queries.size() + " deleted queries on " +
+    //          + infos.size() + " segments.");
+    boolean any = false;
+    BufferedDeletes coalescedDeletes = new BufferedDeletes(true);
+    
+    SegmentInfo lastApplyInfo = applyInfos.lastElement();
+    int lastIdx = segmentInfos.indexOf(lastApplyInfo);
+    
+    SegmentInfo firstInfo = applyInfos.firstElement();
+    int firstIdx = segmentInfos.indexOf(firstInfo);
+    
+    if (DEBUG) System.out.println("all deletes");
+    printDeletes(segmentInfos);
+    if (DEBUG) System.out.println("coalesced deletes");
+    
+    // iterate over all segment infos backwards
+    // coalesceing deletes along the way 
+    // when we're at or below the last of the 
+    // segments to apply to, start applying the deletes
+    // we traverse up to the first apply infos
+    for (int x=segmentInfos.size()-1; x >= firstIdx; x--) {
+      SegmentInfo info = segmentInfos.info(x);
+      BufferedDeletes deletes = getDeletes(info);
+
+      if (x <= lastIdx) {
+        any |= applyDeletes(info, coalescedDeletes, deletes);
+      
+        // we've applied doc ids, and they're only applied per-segment
+        deletes.clearDocIDs();
+      }
+      // now coalesce at the max limit
+      coalescedDeletes.update(deletes, Integer.MAX_VALUE);
+    }
+    
+    if (infoStream != null) {
+      writer.message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
+    }
+    return any;
+  }
+  
+  private synchronized boolean applyDeletes(SegmentInfo info, 
+      BufferedDeletes coalescedDeletes,
+      BufferedDeletes segmentDeletes) throws IOException {    
+    assert writer.readerPool.infoIsLive(info);
+    
+    assert coalescedDeletes.docIDs.size() == 0;
+    
+    boolean any = false;
+    
+    SegmentReader reader = writer.readerPool.get(info, false);
+    try {
+      any |= applyDeletes(coalescedDeletes, reader);
+      any |= applyDeletes(segmentDeletes, reader);
+    } finally {
+      writer.readerPool.release(reader);
+    }
+    return any;
+  }
+  
+  private synchronized boolean applyDeletes(BufferedDeletes deletes, SegmentReader reader) throws IOException {
+    boolean any = false;
+
+    assert checkDeleteTerm(null);
+    
+    if (DEBUG) {
+      System.out.println("segment: "+reader.getSegmentName()+" applyDeletes: "+deletes);
+    }
+    
+    if (deletes.terms.size() > 0) {
+      Fields fields = reader.fields();
+      if (fields == null) {
+        // This reader has no postings
+        return false;
+      }
+
+      TermsEnum termsEnum = null;
+        
+      String currentField = null;
+      DocsEnum docs = null;
+        
+      for (Entry<Term, BufferedDeletes.Num> entry: deletes.terms.entrySet()) {
+        Term term = entry.getKey();
+        // Since we visit terms sorted, we gain performance
+        // by re-using the same TermsEnum and seeking only
+        // forwards
+        if (term.field() != currentField) {
+          assert currentField == null || currentField.compareTo(term.field()) < 0;
+          currentField = term.field();
+          Terms terms = fields.terms(currentField);
+          if (terms != null) {
+            termsEnum = terms.iterator();
+          } else {
+            termsEnum = null;
+          }
+        }
+          
+        if (termsEnum == null) {
+          continue;
+        }
+        assert checkDeleteTerm(term);
+          
+        if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) {
+          DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
+            
+          if (docsEnum != null) {
+            docs = docsEnum;
+            int limit = entry.getValue().getNum();
+            while (true) {
+              final int docID = docs.nextDoc();
+              if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) {
+                break;
+              }
+              reader.deleteDocument(docID);
+              any = true;
+            }
+          }
+        }
+      }
+    }
+
+    // Delete by docID
+    for (Integer docIdInt : deletes.docIDs) {
+      int docID = docIdInt.intValue();
+      reader.deleteDocument(docID);
+      any = true;
+    }
+
+    // Delete by query
+    if (deletes.queries.size() > 0) {
+      IndexSearcher searcher = new IndexSearcher(reader);
+      try {
+        for (Entry<Query, Integer> entry : deletes.queries.entrySet()) {
+          Query query = entry.getKey();
+          int limit = entry.getValue().intValue();
+          Weight weight = query.weight(searcher);
+          Scorer scorer = weight.scorer(reader, true, false);
+          if (scorer != null) {
+            while(true)  {
+              int doc = scorer.nextDoc();
+              if (doc >= limit)
+                break;
+              reader.deleteDocument(doc);
+              any = true;
+            }
+          }
+        }
+      } finally {
+        searcher.close();
+      }
+    }
+    return any;
+  }
+  
+  synchronized void getForwardDeletes(SegmentInfo startInfo, 
+      BufferedDeletes coalescedDeletes, SegmentInfos segmentInfos) {
+    int idx = segmentInfos.indexOf(startInfo);
+    
+    assert idx >= 0;
+    
+    BufferedDeletes infoDeletes = getDeletes(startInfo);
+    coalescedDeletes.update(infoDeletes);
+    
+    for (int x=(idx+1); x < segmentInfos.size(); x++) {
+      SegmentInfo info = segmentInfos.info(x);
+      BufferedDeletes deletes = getDeletes(info);
+      if (deletes.any()) {
+        coalescedDeletes.update(deletes, Integer.MAX_VALUE);
+      }
+    }
+  }
+  
+  public synchronized BufferedDeletes getDeletes(SegmentInfo info) {
+    BufferedDeletes deletes = deletesMap.get(info);
+    if (deletes == null) {
+      deletes = new BufferedDeletes(true);
+      deletesMap.put(info, deletes);
+    }
+    return deletes;
+  }
+  
+  public synchronized void remove(SegmentInfos infos) {
+    for (SegmentInfo info : infos) {
+      BufferedDeletes deletes = deletesMap.get(info);
+      if (deletes != null) {
+        bytesUsed.addAndGet(-deletes.bytesUsed);
+        deletesMap.remove(info);
+      }
+    }
+  }
+}
\ No newline at end of file
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 1039978)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -115,7 +115,12 @@
  */
 
 final class DocumentsWriter {
-
+  //Coarse estimates used to measure RAM usage of buffered deletes
+  final static int OBJECT_HEADER_BYTES = 8;
+  final static int POINTER_NUM_BYTE = Constants.JRE_IS_64BIT ? 8 : 4;
+  final static int INT_NUM_BYTE = 4;
+  final static int CHAR_NUM_BYTE = 2;
+  
   final AtomicLong bytesUsed = new AtomicLong(0);
   IndexWriter writer;
   Directory directory;
@@ -276,14 +281,6 @@
 
   final DocConsumer consumer;
 
-  // Deletes done after the last flush; these are discarded
-  // on abort
-  private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
-
-  // Deletes done before the last flush; these are still
-  // kept on abort
-  private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
-
   // The max number of delete terms that can be buffered before
   // they must be flushed to disk.
   private int maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
@@ -307,12 +304,6 @@
   synchronized void updateFlushedDocCount(int n) {
     flushedDocCount += n;
   }
-  synchronized int getFlushedDocCount() {
-    return flushedDocCount;
-  }
-  synchronized void setFlushedDocCount(int n) {
-    flushedDocCount = n;
-  }
 
   private boolean closed;
 
@@ -514,10 +505,9 @@
         } catch (Throwable t) {
           abortedFiles = null;
         }
-
-        deletesInRAM.clear();
-        deletesFlushed.clear();
-
+        
+        writer.segmentDeletes.clearPending();
+        
         openFiles.clear();
 
         for(int i=0;i<threadStates.length;i++)
@@ -594,10 +584,7 @@
   }
 
   synchronized boolean anyChanges() {
-    return numDocsInRAM != 0 ||
-      deletesInRAM.numTerms != 0 ||
-      deletesInRAM.docIDs.size() != 0 ||
-      deletesInRAM.queries.size() != 0;
+    return numDocsInRAM != 0;
   }
 
   synchronized private void initFlushState(boolean onlyDocStore) {
@@ -712,10 +699,6 @@
     flushPending = false;
   }
 
-  synchronized void pushDeletes() {
-    deletesFlushed.update(deletesInRAM);
-  }
-
   synchronized void close() {
     closed = true;
     notifyAll();
@@ -737,7 +720,7 @@
    * flush is pending.  If delTerm is non-null then we
    * buffer this deleted term after the thread state has
    * been acquired. */
-  synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
+  synchronized DocumentsWriterThreadState getThreadState(Document doc) throws IOException {
 
     final Thread currentThread = Thread.currentThread();
 
@@ -786,10 +769,10 @@
 
       assert writer.testPoint("DocumentsWriter.ThreadState.init start");
 
-      if (delTerm != null) {
-        addDeleteTerm(delTerm, state.docState.docID);
-        state.doFlushAfter = timeToFlushDeletes();
-      }
+      //if (delTerm != null) {
+     //   writer.segmentDeletes.addDeleteTerm(delTerm, state.docState.docID);
+        //state.doFlushAfter = timeToFlushDeletes();
+      //}
 
       assert writer.testPoint("DocumentsWriter.ThreadState.init after delTerm");
 
@@ -821,24 +804,16 @@
 
     return state;
   }
-
-  /** Returns true if the caller (IndexWriter) should now
-   * flush. */
-  boolean addDocument(Document doc, Analyzer analyzer)
+  
+  //boolean addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
+  //  return updateDocument(doc, analyzer);
+  //}
+  
+  DocumentsWriterThreadState updateDocument(Document doc, Analyzer analyzer)
     throws CorruptIndexException, IOException {
-    return updateDocument(doc, analyzer, null);
-  }
-
-  boolean updateDocument(Term t, Document doc, Analyzer analyzer)
-    throws CorruptIndexException, IOException {
-    return updateDocument(doc, analyzer, t);
-  }
-
-  boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
-    throws CorruptIndexException, IOException {
     
     // This call is synchronized but fast
-    final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
+    final DocumentsWriterThreadState state = getThreadState(doc);
 
     final DocState docState = state.docState;
     docState.doc = doc;
@@ -878,7 +853,7 @@
                 state.isIdle = true;
                 notifyAll();
                 abort();
-                return false;
+                return state;
               }
             }
 
@@ -897,36 +872,16 @@
             // since likely it was partially added.  This
             // keeps indexing as "all or none" (atomic) when
             // adding a document:
-            addDeleteDocID(state.docState.docID);
+            writer.segmentDeletes.addDeleteDocID(state.docState.docID);
           }
         }
       }
     }
 
-    return state.doFlushAfter || timeToFlushDeletes();
+    //return state.doFlushAfter; //|| timeToFlushDeletes();
+    return state;
   }
 
-  // for testing
-  synchronized int getNumBufferedDeleteTerms() {
-    return deletesInRAM.numTerms;
-  }
-
-  // for testing
-  synchronized Map<Term,BufferedDeletes.Num> getBufferedDeleteTerms() {
-    return deletesInRAM.terms;
-  }
-
-  /** Called whenever a merge has completed and the merged segments had deletions */
-  synchronized void remapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount) {
-    if (docMaps == null)
-      // The merged segments had no deletes so docIDs did not change and we have nothing to do
-      return;
-    MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
-    deletesInRAM.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-    deletesFlushed.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-    flushedDocCount -= mapper.docShift;
-  }
-
   synchronized private void waitReady(DocumentsWriterThreadState state) {
 
     while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting)) {
@@ -941,69 +896,20 @@
       throw new AlreadyClosedException("this IndexWriter is closed");
   }
 
-  boolean bufferDeleteTerms(Term[] terms) throws IOException {
-    synchronized(this) {
-      waitReady(null);
-      for (int i = 0; i < terms.length; i++)
-        addDeleteTerm(terms[i], numDocsInRAM);
-    }
-    return timeToFlushDeletes();
-  }
+  //synchronized boolean deletesFull() {
+  //  return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+  //          (writer.segmentDeletes.bytesUsed() + bytesUsed()) >= ramBufferSize) ||
+  //    (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+  //     ((writer.segmentDeletes.numTerms()) >= maxBufferedDeleteTerms));
+  //}
 
-  boolean bufferDeleteTerm(Term term) throws IOException {
-    synchronized(this) {
-      waitReady(null);
-      addDeleteTerm(term, numDocsInRAM);
-    }
-    return timeToFlushDeletes();
-  }
+  //private boolean timeToFlushDeletes() {
+  //  balanceRAM();
+  //  synchronized(this) {
+  //    return (bufferIsFull || deletesFull()) && setFlushPending();
+  //  }
+  //}
 
-  boolean bufferDeleteQueries(Query[] queries) throws IOException {
-    synchronized(this) {
-      waitReady(null);
-      for (int i = 0; i < queries.length; i++)
-        addDeleteQuery(queries[i], numDocsInRAM);
-    }
-    return timeToFlushDeletes();
-  }
-
-  boolean bufferDeleteQuery(Query query) throws IOException {
-    synchronized(this) {
-      waitReady(null);
-      addDeleteQuery(query, numDocsInRAM);
-    }
-    return timeToFlushDeletes();
-  }
-
-  synchronized boolean deletesFull() {
-    return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + bytesUsed()) >= ramBufferSize) ||
-      (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-       ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
-  }
-
-  synchronized boolean doApplyDeletes() {
-    // Very similar to deletesFull(), except we don't count
-    // numBytesUsed, because we are checking whether
-    // deletes (alone) are consuming too many resources now
-    // and thus should be applied.  We apply deletes if RAM
-    // usage is > 1/2 of our allowed RAM buffer, to prevent
-    // too-frequent flushing of a long tail of tiny segments
-    // when merges (which always apply deletes) are
-    // infrequent.
-    return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
-      (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-       ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
-  }
-
-  private boolean timeToFlushDeletes() {
-    balanceRAM();
-    synchronized(this) {
-      return (bufferIsFull || deletesFull()) && setFlushPending();
-    }
-  }
-
   void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
     this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
   }
@@ -1012,188 +918,6 @@
     return maxBufferedDeleteTerms;
   }
 
-  synchronized boolean hasDeletes() {
-    return deletesFlushed.any();
-  }
-
-  synchronized boolean applyDeletes(SegmentInfos infos) throws IOException {
-
-    if (!hasDeletes())
-      return false;
-
-    final long t0 = System.currentTimeMillis();
-
-    if (infoStream != null)
-      message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
-              deletesFlushed.docIDs.size() + " deleted docIDs and " +
-              deletesFlushed.queries.size() + " deleted queries on " +
-              + infos.size() + " segments.");
-
-    final int infosEnd = infos.size();
-
-    int docStart = 0;
-    boolean any = false;
-    for (int i = 0; i < infosEnd; i++) {
-
-      // Make sure we never attempt to apply deletes to
-      // segment in external dir
-      assert infos.info(i).dir == directory;
-
-      SegmentReader reader = writer.readerPool.get(infos.info(i), false);
-      try {
-        any |= applyDeletes(reader, docStart);
-        docStart += reader.maxDoc();
-      } finally {
-        writer.readerPool.release(reader);
-      }
-    }
-
-    deletesFlushed.clear();
-    if (infoStream != null) {
-      message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
-    }
-
-    return any;
-  }
-
-  // used only by assert
-  private Term lastDeleteTerm;
-
-  // used only by assert
-  private boolean checkDeleteTerm(Term term) {
-    if (term != null) {
-      assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term;
-    }
-    lastDeleteTerm = term;
-    return true;
-  }
-
-  // Apply buffered delete terms, queries and docIDs to the
-  // provided reader
-  private final synchronized boolean applyDeletes(IndexReader reader, int docIDStart)
-    throws CorruptIndexException, IOException {
-
-    final int docEnd = docIDStart + reader.maxDoc();
-    boolean any = false;
-
-    assert checkDeleteTerm(null);
-
-    // Delete by term
-    if (deletesFlushed.terms.size() > 0) {
-      Fields fields = reader.fields();
-      if (fields == null) {
-        // This reader has no postings
-        return false;
-      }
-
-      TermsEnum termsEnum = null;
-        
-      String currentField = null;
-      DocsEnum docs = null;
-        
-      for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
-        Term term = entry.getKey();
-        // Since we visit terms sorted, we gain performance
-        // by re-using the same TermsEnum and seeking only
-        // forwards
-        if (term.field() != currentField) {
-          assert currentField == null || currentField.compareTo(term.field()) < 0;
-          currentField = term.field();
-          Terms terms = fields.terms(currentField);
-          if (terms != null) {
-            termsEnum = terms.iterator();
-          } else {
-            termsEnum = null;
-          }
-        }
-          
-        if (termsEnum == null) {
-          continue;
-        }
-        assert checkDeleteTerm(term);
-          
-        if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) {
-          DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
-            
-          if (docsEnum != null) {
-            docs = docsEnum;
-            int limit = entry.getValue().getNum();
-            while (true) {
-              final int docID = docs.nextDoc();
-              if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) {
-                break;
-              }
-              reader.deleteDocument(docID);
-              any = true;
-            }
-          }
-        }
-      }
-    }
-
-    // Delete by docID
-    for (Integer docIdInt : deletesFlushed.docIDs) {
-      int docID = docIdInt.intValue();
-      if (docID >= docIDStart && docID < docEnd) {
-        reader.deleteDocument(docID-docIDStart);
-        any = true;
-      }
-    }
-
-    // Delete by query
-    if (deletesFlushed.queries.size() > 0) {
-      IndexSearcher searcher = new IndexSearcher(reader);
-      try {
-        for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
-          Query query = entry.getKey();
-          int limit = entry.getValue().intValue();
-          Weight weight = query.weight(searcher);
-          Scorer scorer = weight.scorer(reader, true, false);
-          if (scorer != null) {
-            while(true)  {
-              int doc = scorer.nextDoc();
-              if (((long) docIDStart) + doc >= limit)
-                break;
-              reader.deleteDocument(doc);
-              any = true;
-            }
-          }
-        }
-      } finally {
-        searcher.close();
-      }
-    }
-    return any;
-  }
-
-  // Buffer a term in bufferedDeleteTerms, which records the
-  // current number of documents buffered in ram so that the
-  // delete term will be applied to those documents as well
-  // as the disk segments.
-  synchronized private void addDeleteTerm(Term term, int docCount) {
-    BufferedDeletes.Num num = deletesInRAM.terms.get(term);
-    final int docIDUpto = flushedDocCount + docCount;
-    if (num == null)
-      deletesInRAM.terms.put(term, new BufferedDeletes.Num(docIDUpto));
-    else
-      num.setNum(docIDUpto);
-    deletesInRAM.numTerms++;
-
-    deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.bytes.length);
-  }
-
-  // Buffer a specific docID for deletion.  Currently only
-  // used when we hit a exception when adding a document
-  synchronized private void addDeleteDocID(int docID) {
-    deletesInRAM.docIDs.add(Integer.valueOf(flushedDocCount+docID));
-    deletesInRAM.addBytesUsed(BYTES_PER_DEL_DOCID);
-  }
-
-  synchronized private void addDeleteQuery(Query query, int docID) {
-    deletesInRAM.queries.put(query, Integer.valueOf(flushedDocCount + docID));
-    deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
-  }
-
   /** Does the synchronized work to finish/flush the
    *  inverted document. */
   private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
@@ -1270,41 +994,11 @@
   final SkipDocWriter skipDocWriter = new SkipDocWriter();
 
   long getRAMUsed() {
-    return bytesUsed() + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
+    return bytesUsed() + writer.segmentDeletes.bytesUsed();
   }
 
   NumberFormat nf = NumberFormat.getInstance();
 
-  // Coarse estimates used to measure RAM usage of buffered deletes
-  final static int OBJECT_HEADER_BYTES = 8;
-  final static int POINTER_NUM_BYTE = Constants.JRE_IS_64BIT ? 8 : 4;
-  final static int INT_NUM_BYTE = 4;
-  final static int CHAR_NUM_BYTE = 2;
-
-  /* Rough logic: HashMap has an array[Entry] w/ varying
-     load factor (say 2 * POINTER).  Entry is object w/ Term
-     key, BufferedDeletes.Num val, int hash, Entry next
-     (OBJ_HEADER + 3*POINTER + INT).  Term is object w/
-     String field and String text (OBJ_HEADER + 2*POINTER).
-     We don't count Term's field since it's interned.
-     Term's text is String (OBJ_HEADER + 4*INT + POINTER +
-     OBJ_HEADER + string.length*CHAR).  BufferedDeletes.num is
-     OBJ_HEADER + INT. */
- 
-  final static int BYTES_PER_DEL_TERM = 8*POINTER_NUM_BYTE + 5*OBJECT_HEADER_BYTES + 6*INT_NUM_BYTE;
-
-  /* Rough logic: del docIDs are List<Integer>.  Say list
-     allocates ~2X size (2*POINTER).  Integer is OBJ_HEADER
-     + int */
-  final static int BYTES_PER_DEL_DOCID = 2*POINTER_NUM_BYTE + OBJECT_HEADER_BYTES + INT_NUM_BYTE;
-
-  /* Rough logic: HashMap has an array[Entry] w/ varying
-     load factor (say 2 * POINTER).  Entry is object w/
-     Query key, Integer val, int hash, Entry next
-     (OBJ_HEADER + 3*POINTER + INT).  Query we often
-     undercount (say 24 bytes).  Integer is OBJ_HEADER + INT. */
-  final static int BYTES_PER_DEL_QUERY = 5*POINTER_NUM_BYTE + 2*OBJECT_HEADER_BYTES + 2*INT_NUM_BYTE + 24;
-
   /* Initial chunks size of the shared byte[] blocks used to
      store postings data */
   final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
@@ -1333,7 +1027,7 @@
     return b;
   }
 
-  private long bytesUsed() {
+  long bytesUsed() {
     return bytesUsed.get();
   }
 
@@ -1375,7 +1069,7 @@
         return;
       }
     
-      deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushed.bytesUsed;
+      deletesRAMUsed = writer.segmentDeletes.bytesUsed();
       doBalance = bytesUsed() +deletesRAMUsed >= ramBufferSize;
     }
 
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 1039978)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -21,8 +21,11 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.PayloadProcessorProvider.DirPayloadProcessor;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
@@ -47,6 +50,7 @@
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Date;
+import java.util.Map.Entry;
 
 /**
   An <code>IndexWriter</code> creates and maintains an index.
@@ -280,7 +284,7 @@
   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
   volatile long pendingCommitChangeCount;
 
-  private final SegmentInfos segmentInfos;       // the segments
+  final SegmentInfos segmentInfos;       // the segments
 
   private DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
@@ -313,6 +317,7 @@
   private int flushDeletesCount;
 
   final ReaderPool readerPool = new ReaderPool();
+  final SegmentDeletes segmentDeletes = new SegmentDeletes(this);
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -664,6 +669,8 @@
     }
   }
   
+  
+  
   /**
    * Obtain the number of deleted docs for a pooled reader.
    * If the reader isn't being pooled, the segmentInfo's 
@@ -1570,6 +1577,7 @@
     setMessageID(infoStream);
     docWriter.setInfoStream(infoStream);
     deleter.setInfoStream(infoStream);
+    segmentDeletes.setInfoStream(infoStream);
     if (infoStream != null)
       messageState();
   }
@@ -1943,7 +1951,7 @@
 
   public synchronized boolean hasDeletions() throws IOException {
     ensureOpen();
-    if (docWriter.hasDeletes())
+    if (segmentDeletes.hasDeletes())
       return true;
     for (int i = 0; i < segmentInfos.size(); i++)
       if (segmentInfos.info(i).hasDeletions())
@@ -2038,7 +2046,7 @@
     boolean success = false;
     try {
       try {
-        doFlush = docWriter.addDocument(doc, analyzer);
+        doFlush = docWriter.updateDocument(doc, analyzer).doFlushAfter;
         success = true;
       } finally {
         if (!success) {
@@ -2078,7 +2086,11 @@
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerm(term);
+      boolean doFlush = false;
+      synchronized (this) {
+        int docCount = docWriter.getNumDocsInRAM();
+        segmentDeletes.addDeleteTerm(term, docCount);
+      } 
       if (doFlush)
         flush(true, false, false);
     } catch (OutOfMemoryError oom) {
@@ -2102,7 +2114,11 @@
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerms(terms);
+      boolean doFlush = false;
+      synchronized (this) {
+        int docCount = docWriter.getNumDocsInRAM();
+        segmentDeletes.addDeleteTerms(docCount, terms);
+      }
       if (doFlush)
         flush(true, false, false);
     } catch (OutOfMemoryError oom) {
@@ -2123,7 +2139,11 @@
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQuery(query);
+    boolean doFlush = false;
+    synchronized (this) {
+      int docCount = docWriter.getNumDocsInRAM();
+      segmentDeletes.addDeleteQuery(query, docCount);
+    }
     if (doFlush)
       flush(true, false, false);
   }
@@ -2143,7 +2163,11 @@
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQueries(queries);
+    boolean doFlush = false;
+    synchronized (this) {
+      int docCount = docWriter.getNumDocsInRAM();
+      segmentDeletes.addDeleteQueries(docCount, queries);
+    }
     if (doFlush)
       flush(true, false, false);
   }
@@ -2195,7 +2219,9 @@
       boolean doFlush = false;
       boolean success = false;
       try {
-        doFlush = docWriter.updateDocument(term, doc, analyzer);
+        DocumentsWriterThreadState state = docWriter.updateDocument(doc, analyzer);
+        segmentDeletes.addDeleteTerm(term, state.docState.docID);
+        doFlush = state.doFlushAfter;
         success = true;
       } finally {
         if (!success) {
@@ -2745,7 +2771,6 @@
 
       // Remove any buffered docs
       docWriter.abort();
-      docWriter.setFlushedDocCount(0);
 
       // Remove all segments
       segmentInfos.clear();
@@ -3303,7 +3328,7 @@
     // If we are flushing because too many deletes
     // accumulated, then we should apply the deletes to free
     // RAM:
-    flushDeletes |= docWriter.doApplyDeletes();
+    flushDeletes |= segmentDeletes.doApplyDeletes();
 
     // Make sure no threads are actively adding a document.
     // Returns true if docWriter is currently aborting, in
@@ -3344,7 +3369,7 @@
                 " flushDeletes=" + flushDeletes +
                 " flushDocStores=" + flushDocStores +
                 " numDocs=" + numDocs +
-                " numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
+                " numBufDelTerms=" + segmentDeletes.numTerms());
         message("  index before flush " + segString());
       }
 
@@ -3411,11 +3436,17 @@
         setDiagnostics(newSegment, "flush");
       }
 
-      docWriter.pushDeletes();
+      
 
       if (flushDocs) {
         segmentInfos.add(newSegment);
+        segmentDeletes.pushDeletes(newSegment);
         checkpoint();
+      } else {//if (flushDeletes) {
+        if (segmentInfos.size() > 0) {
+          SegmentInfo lastSegmentInfo = segmentInfos.lastElement();
+          segmentDeletes.pushDeletes(lastSegmentInfo, Integer.MAX_VALUE, false);
+        }
       }
 
       if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
@@ -3437,9 +3468,10 @@
       }
 
       if (flushDeletes) {
-        applyDeletes();
+        if (segmentInfos.size() > 0) {
+          applyDeletesAll();
+        }
       }
-      
       if (flushDocs)
         checkpoint();
 
@@ -3602,7 +3634,6 @@
     final int start = ensureContiguousMerge(merge);
 
     commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
       
     // If the doc store we are using has been closed and
     // is in now compound format (but wasn't when we
@@ -3615,7 +3646,7 @@
     segmentInfos.subList(start, start + merge.segments.size()).clear();
     assert !segmentInfos.contains(merge.info);
     segmentInfos.add(start, merge.info);
-
+    
     closeMergeReaders(merge, false);
 
     // Must note the change to segmentInfos so any commits
@@ -3626,6 +3657,10 @@
     // them so that they don't bother writing them to
     // disk, updating SegmentInfo, etc.:
     readerPool.clear(merge.segments);
+    
+    // remove pending deletes of the segments 
+    // that were merged
+    segmentDeletes.mergeAndRemove(merge);
 
     if (merge.optimize) {
       // cascade the optimize:
@@ -3811,9 +3846,12 @@
 
     if (merge.isAborted())
       return;
+    
+    if (SegmentDeletes.DEBUG) {
+      System.out.println("_mergeInit applyDeletesAll");
+    }
+    applyDeletesAll();
 
-    applyDeletes();
-
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
 
@@ -4275,7 +4313,7 @@
   }
 
   // Apply buffered deletes to all segments.
-  private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
+  private final synchronized boolean applyDeletesAll() throws CorruptIndexException, IOException {
     assert testPoint("startApplyDeletes");
     if (infoStream != null) {
       message("applyDeletes");
@@ -4284,7 +4322,10 @@
     boolean success = false;
     boolean changed;
     try {
-      changed = docWriter.applyDeletes(segmentInfos);
+      changed = segmentDeletes.applyDeletes(segmentInfos, segmentInfos);
+      
+      // all deletes should have been applied
+      segmentDeletes.remove(segmentInfos);
       success = true;
     } finally {
       if (!success && infoStream != null) {
@@ -4299,12 +4340,12 @@
 
   // For test purposes.
   final synchronized int getBufferedDeleteTermsSize() {
-    return docWriter.getBufferedDeleteTerms().size();
+    return segmentDeletes.numTerms();
   }
 
   // For test purposes.
   final synchronized int getNumBufferedDeleteTerms() {
-    return docWriter.getNumBufferedDeleteTerms();
+    return segmentDeletes.numTerms();
   }
 
   // utility routines for tests
@@ -4641,7 +4682,7 @@
       // are stale
       return false;
     } else {
-      return !docWriter.anyChanges();
+      return !docWriter.anyChanges() || segmentDeletes.hasDeletes();
     }
   }
 
