Index: src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing2.java	(revision 1037578)
+++ src/test/org/apache/lucene/index/TestStressIndexing2.java	(working copy)
@@ -91,6 +91,18 @@
     dir2.close();
   }
 
+  public void testMultiConfigMany() throws Throwable {
+    for (int x=0; x < 500; x++) {
+      try {
+        testMultiConfig();
+      } catch (Throwable th) {
+        System.err.println("x:"+x);
+        //th.printStackTrace();
+        throw th;
+      }
+    }
+  }
+  
   public void testMultiConfig() throws Throwable {
     // test lots of smaller different params together
 
Index: src/java/org/apache/lucene/index/BufferedDeletes.java
===================================================================
--- src/java/org/apache/lucene/index/BufferedDeletes.java	(revision 1037578)
+++ src/java/org/apache/lucene/index/BufferedDeletes.java	(working copy)
@@ -82,13 +82,23 @@
     return numTerms + queries.size() + docIDs.size();
   }
 
+  void updateMaxLimit(BufferedDeletes in) {
+    numTerms += in.numTerms;
+    bytesUsed += in.bytesUsed;
+    for (Term term : in.terms.keySet()) {
+      terms.put(term, new Num(Integer.MAX_VALUE));
+    }
+    for (Query query : in.queries.keySet()) {
+      queries.put(query, Integer.MAX_VALUE);
+    }
+  }
+  
   void update(BufferedDeletes in) {
     numTerms += in.numTerms;
     bytesUsed += in.bytesUsed;
     terms.putAll(in.terms);
     queries.putAll(in.queries);
     docIDs.addAll(in.docIDs);
-    in.clear();
   }
     
   void clear() {
@@ -106,64 +116,4 @@
   boolean any() {
     return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
   }
-
-  // Remaps all buffered deletes based on a completed
-  // merge
-  synchronized void remap(MergeDocIDRemapper mapper,
-                          SegmentInfos infos,
-                          int[][] docMaps,
-                          int[] delCounts,
-                          MergePolicy.OneMerge merge,
-                          int mergeDocCount) {
-
-    final Map<Term,Num> newDeleteTerms;
-
-    // Remap delete-by-term
-    if (terms.size() > 0) {
-      if (doTermSort) {
-        newDeleteTerms = new TreeMap<Term,Num>();
-      } else {
-        newDeleteTerms = new HashMap<Term,Num>();
-      }
-      for(Entry<Term,Num> entry : terms.entrySet()) {
-        Num num = entry.getValue();
-        newDeleteTerms.put(entry.getKey(),
-                           new Num(mapper.remap(num.getNum())));
-      }
-    } else 
-      newDeleteTerms = null;
-    
-
-    // Remap delete-by-docID
-    final List<Integer> newDeleteDocIDs;
-
-    if (docIDs.size() > 0) {
-      newDeleteDocIDs = new ArrayList<Integer>(docIDs.size());
-      for (Integer num : docIDs) {
-        newDeleteDocIDs.add(Integer.valueOf(mapper.remap(num.intValue())));
-      }
-    } else 
-      newDeleteDocIDs = null;
-    
-
-    // Remap delete-by-query
-    final HashMap<Query,Integer> newDeleteQueries;
-    
-    if (queries.size() > 0) {
-      newDeleteQueries = new HashMap<Query, Integer>(queries.size());
-      for(Entry<Query,Integer> entry: queries.entrySet()) {
-        Integer num = entry.getValue();
-        newDeleteQueries.put(entry.getKey(),
-                             Integer.valueOf(mapper.remap(num.intValue())));
-      }
-    } else
-      newDeleteQueries = null;
-
-    if (newDeleteTerms != null)
-      terms = newDeleteTerms;
-    if (newDeleteDocIDs != null)
-      docIDs = newDeleteDocIDs;
-    if (newDeleteQueries != null)
-      queries = newDeleteQueries;
-  }
 }
\ No newline at end of file
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java	(revision 1037578)
+++ src/java/org/apache/lucene/index/SegmentInfo.java	(working copy)
@@ -39,7 +39,7 @@
  * @lucene.experimental
  */
 public final class SegmentInfo {
-
+  BufferedDeletes deletes = new BufferedDeletes(true);
   static final int NO = -1;          // e.g. no norms; no deletes;
   static final int YES = 1;          // e.g. have norms; have deletes;
   static final int WITHOUT_GEN = 0;  // a file name that has no GEN in it. 
@@ -230,6 +230,7 @@
   @Override
   public Object clone() {
     SegmentInfo si = new SegmentInfo(name, docCount, dir, isCompoundFile, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, hasProx, segmentCodecs);
+    si.deletes = deletes;
     si.isCompoundFile = isCompoundFile;
     si.delGen = delGen;
     si.delCount = delCount;
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 1037578)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -280,10 +280,6 @@
   // on abort
   private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
 
-  // Deletes done before the last flush; these are still
-  // kept on abort
-  private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
-
   // The max number of delete terms that can be buffered before
   // they must be flushed to disk.
   private int maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
@@ -304,6 +300,8 @@
 
   private int flushedDocCount;                      // How many docs already flushed to index
 
+  SegmentInfos segmentInfos;
+  
   synchronized void updateFlushedDocCount(int n) {
     flushedDocCount += n;
   }
@@ -516,7 +514,6 @@
         }
 
         deletesInRAM.clear();
-        deletesFlushed.clear();
 
         openFiles.clear();
 
@@ -594,7 +591,7 @@
   }
 
   synchronized boolean anyChanges() {
-    return numDocsInRAM != 0 ||
+    return numDocsInRAM != 0 || 
       deletesInRAM.numTerms != 0 ||
       deletesInRAM.docIDs.size() != 0 ||
       deletesInRAM.queries.size() != 0;
@@ -712,9 +709,18 @@
     flushPending = false;
   }
 
-  synchronized void pushDeletes() {
-    deletesFlushed.update(deletesInRAM);
+  synchronized void pushDeletes(SegmentInfo newInfo) {
+    newInfo.deletes.update(deletesInRAM);
+    deletesInRAM.clear();
   }
+  
+  synchronized void pushDeletesLastSegment() {
+    if (segmentInfos.size() > 0) {
+      SegmentInfo info = segmentInfos.lastElement();
+      info.deletes.update(deletesInRAM);
+      deletesInRAM.clear();
+    }
+  }
 
   synchronized void close() {
     closed = true;
@@ -916,17 +922,6 @@
     return deletesInRAM.terms;
   }
 
-  /** Called whenever a merge has completed and the merged segments had deletions */
-  synchronized void remapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount) {
-    if (docMaps == null)
-      // The merged segments had no deletes so docIDs did not change and we have nothing to do
-      return;
-    MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
-    deletesInRAM.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-    deletesFlushed.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-    flushedDocCount -= mapper.docShift;
-  }
-
   synchronized private void waitReady(DocumentsWriterThreadState state) {
 
     while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting)) {
@@ -977,9 +972,9 @@
 
   synchronized boolean deletesFull() {
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + bytesUsed()) >= ramBufferSize) ||
+            (deletesInRAM.bytesUsed + bytesUsed()) >= ramBufferSize) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-       ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
+       ((deletesInRAM.size()) >= maxBufferedDeleteTerms));
   }
 
   synchronized boolean doApplyDeletes() {
@@ -992,9 +987,9 @@
     // when merges (which always apply deletes) are
     // infrequent.
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-            (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
+            (deletesInRAM.bytesUsed) >= ramBufferSize/2) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
-       ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
+       ((deletesInRAM.size()) >= maxBufferedDeleteTerms));
   }
 
   private boolean timeToFlushDeletes() {
@@ -1012,43 +1007,61 @@
     return maxBufferedDeleteTerms;
   }
 
+  synchronized void pushSegmentInfos(SegmentInfos infos) {
+    this.segmentInfos = infos;
+  }
+  
   synchronized boolean hasDeletes() {
-    return deletesFlushed.any();
+    if (deletesInRAM.any()) {
+      return true;
+    }
+    for (SegmentInfo info : segmentInfos) {
+      if (info.deletes.any()) {
+        return true;
+      }
+    }
+    return false;
   }
 
+  synchronized void mergeDeletes(MergePolicy.OneMerge merge) {
+    BufferedDeletes mergedDeletes = merge.info.deletes;
+    for (SegmentInfo info : segmentInfos) {
+      mergedDeletes.updateMaxLimit(info.deletes);
+    }
+  }
+  
   synchronized boolean applyDeletes(SegmentInfos infos) throws IOException {
-
     if (!hasDeletes())
       return false;
 
     final long t0 = System.currentTimeMillis();
 
     if (infoStream != null)
-      message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
-              deletesFlushed.docIDs.size() + " deleted docIDs and " +
-              deletesFlushed.queries.size() + " deleted queries on " +
+      message("apply " + deletesInRAM.numTerms + " buffered deleted terms and " +
+          deletesInRAM.docIDs.size() + " deleted docIDs and " +
+          deletesInRAM.queries.size() + " deleted queries on " +
               + infos.size() + " segments.");
 
     final int infosEnd = infos.size();
 
-    int docStart = 0;
     boolean any = false;
     for (int i = 0; i < infosEnd; i++) {
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
       assert infos.info(i).dir == directory;
-
-      SegmentReader reader = writer.readerPool.get(infos.info(i), false);
+      
+      SegmentInfo info = infos.info(i);
+      
+      SegmentReader reader = writer.readerPool.get(info, false);
       try {
-        any |= applyDeletes(reader, docStart);
-        docStart += reader.maxDoc();
+        BufferedDeletes deletes = getForwardDeletes(info);
+        any |= applyDeletes(deletes, reader);
       } finally {
         writer.readerPool.release(reader);
       }
     }
 
-    deletesFlushed.clear();
     if (infoStream != null) {
       message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
     }
@@ -1056,6 +1069,24 @@
     return any;
   }
 
+  synchronized void clearDeletes() {
+    deletesInRAM.clear();
+    for (SegmentInfo info : segmentInfos) {
+      info.deletes.clear();
+    }
+  }
+  
+  synchronized BufferedDeletes getForwardDeletes(SegmentInfo info) {
+    int start = segmentInfos.indexOf(info);
+    BufferedDeletes dels = new BufferedDeletes(true);
+    dels.update(info.deletes);
+    for (int x=(start+1); x < segmentInfos.size(); x++) {
+      SegmentInfo si = segmentInfos.info(x);
+      dels.updateMaxLimit(si.deletes);
+    }
+    return dels;
+  }
+  
   // used only by assert
   private Term lastDeleteTerm;
 
@@ -1070,16 +1101,15 @@
 
   // Apply buffered delete terms, queries and docIDs to the
   // provided reader
-  private final synchronized boolean applyDeletes(IndexReader reader, int docIDStart)
+  private final synchronized boolean applyDeletes(BufferedDeletes deletes, IndexReader reader)
     throws CorruptIndexException, IOException {
 
-    final int docEnd = docIDStart + reader.maxDoc();
     boolean any = false;
 
     assert checkDeleteTerm(null);
 
     // Delete by term
-    if (deletesFlushed.terms.size() > 0) {
+    if (deletes.terms.size() > 0) {
       Fields fields = reader.fields();
       if (fields == null) {
         // This reader has no postings
@@ -1091,7 +1121,7 @@
       String currentField = null;
       DocsEnum docs = null;
         
-      for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
+      for (Entry<Term, BufferedDeletes.Num> entry: deletes.terms.entrySet()) {
         Term term = entry.getKey();
         // Since we visit terms sorted, we gain performance
         // by re-using the same TermsEnum and seeking only
@@ -1120,7 +1150,7 @@
             int limit = entry.getValue().getNum();
             while (true) {
               final int docID = docs.nextDoc();
-              if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) {
+              if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) {
                 break;
               }
               reader.deleteDocument(docID);
@@ -1132,19 +1162,17 @@
     }
 
     // Delete by docID
-    for (Integer docIdInt : deletesFlushed.docIDs) {
+    for (Integer docIdInt : deletes.docIDs) {
       int docID = docIdInt.intValue();
-      if (docID >= docIDStart && docID < docEnd) {
-        reader.deleteDocument(docID-docIDStart);
-        any = true;
-      }
+      reader.deleteDocument(docID);
+      any = true;
     }
 
     // Delete by query
-    if (deletesFlushed.queries.size() > 0) {
+    if (deletes.queries.size() > 0) {
       IndexSearcher searcher = new IndexSearcher(reader);
       try {
-        for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
+        for (Entry<Query, Integer> entry : deletes.queries.entrySet()) {
           Query query = entry.getKey();
           int limit = entry.getValue().intValue();
           Weight weight = query.weight(searcher);
@@ -1152,7 +1180,7 @@
           if (scorer != null) {
             while(true)  {
               int doc = scorer.nextDoc();
-              if (((long) docIDStart) + doc >= limit)
+              if (doc >= limit)
                 break;
               reader.deleteDocument(doc);
               any = true;
@@ -1172,7 +1200,7 @@
   // as the disk segments.
   synchronized private void addDeleteTerm(Term term, int docCount) {
     BufferedDeletes.Num num = deletesInRAM.terms.get(term);
-    final int docIDUpto = flushedDocCount + docCount;
+    final int docIDUpto = docCount;
     if (num == null)
       deletesInRAM.terms.put(term, new BufferedDeletes.Num(docIDUpto));
     else
@@ -1185,12 +1213,12 @@
   // Buffer a specific docID for deletion.  Currently only
   // used when we hit a exception when adding a document
   synchronized private void addDeleteDocID(int docID) {
-    deletesInRAM.docIDs.add(Integer.valueOf(flushedDocCount+docID));
+    deletesInRAM.docIDs.add(Integer.valueOf(docID));
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_DOCID);
   }
 
   synchronized private void addDeleteQuery(Query query, int docID) {
-    deletesInRAM.queries.put(query, Integer.valueOf(flushedDocCount + docID));
+    deletesInRAM.queries.put(query, Integer.valueOf(docID));
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
   }
 
@@ -1270,7 +1298,7 @@
   final SkipDocWriter skipDocWriter = new SkipDocWriter();
 
   long getRAMUsed() {
-    return bytesUsed() + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
+    return bytesUsed() + deletesInRAM.bytesUsed;
   }
 
   NumberFormat nf = NumberFormat.getInstance();
@@ -1375,7 +1403,7 @@
         return;
       }
     
-      deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushed.bytesUsed;
+      deletesRAMUsed = deletesInRAM.bytesUsed;
       doBalance = bytesUsed() +deletesRAMUsed >= ramBufferSize;
     }
 
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 1037578)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -1114,6 +1114,7 @@
       docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates());
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
+      docWriter.pushSegmentInfos(segmentInfos);
 
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
@@ -1778,7 +1779,11 @@
       if (!hitOOM) {
         commit(0);
       }
-
+      
+      for (SegmentInfo info : segmentInfos) {
+        assert !info.deletes.any();
+      }
+      
       if (infoStream != null)
         message("at close: " + segString());
 
@@ -3228,7 +3233,7 @@
         segmentInfos.updateGeneration(pendingCommit);
         segmentInfos.setUserData(pendingCommit.getUserData());
         setRollbackSegmentInfos(pendingCommit);
-        deleter.checkpoint(pendingCommit, true);
+        deleter.checkpoint(pendingCommit, true);       
       } finally {
         // Matches the incRef done in startCommit:
         deleter.decRef(pendingCommit);
@@ -3239,7 +3244,11 @@
     } else if (infoStream != null) {
       message("commit: pendingCommit == null; skip");
     }
-
+    assert !docWriter.hasDeletes();
+    for (SegmentInfo info : segmentInfos) {
+      assert !info.deletes.any();
+    } 
+    
     if (infoStream != null) {
       message("commit: done");
     }
@@ -3413,11 +3422,13 @@
         setDiagnostics(newSegment, "flush");
       }
 
-      docWriter.pushDeletes();
-
       if (flushDocs) {
         segmentInfos.add(newSegment);
+        docWriter.pushDeletes(newSegment);
         checkpoint();
+        docWriter.pushSegmentInfos(segmentInfos);
+      } else {
+        docWriter.pushDeletesLastSegment();
       }
 
       if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
@@ -3604,7 +3615,6 @@
     final int start = ensureContiguousMerge(merge);
 
     commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
       
     // If the doc store we are using has been closed and
     // is in now compound format (but wasn't when we
@@ -3613,16 +3623,20 @@
     setMergeDocStoreIsCompoundFile(merge);
 
     merge.info.setHasProx(merger.hasProx());
-
-    segmentInfos.subList(start, start + merge.segments.size()).clear();
-    assert !segmentInfos.contains(merge.info);
-    segmentInfos.add(start, merge.info);
-
+    synchronized (docWriter) {
+      docWriter.mergeDeletes(merge);
+    
+      segmentInfos.subList(start, start + merge.segments.size()).clear();
+      assert !segmentInfos.contains(merge.info);
+      segmentInfos.add(start, merge.info);
+    }
     closeMergeReaders(merge, false);
 
     // Must note the change to segmentInfos so any commits
     // in-flight don't lose it:
     checkpoint();
+    
+    docWriter.pushSegmentInfos(segmentInfos);
 
     // If the merged segments had pending changes, clear
     // them so that they don't bother writing them to
@@ -3814,7 +3828,7 @@
     if (merge.isAborted())
       return;
 
-    applyDeletes();
+    docWriter.applyDeletes(merge.segments);
 
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
@@ -4286,14 +4300,17 @@
     boolean success = false;
     boolean changed;
     try {
-      changed = docWriter.applyDeletes(segmentInfos);
+      synchronized (docWriter) {
+        assert docWriter.segmentInfos.equals(segmentInfos);
+        changed = docWriter.applyDeletes(segmentInfos);
+        docWriter.clearDeletes();
+      }
       success = true;
     } finally {
       if (!success && infoStream != null) {
         message("hit exception flushing deletes");
       }
     }
-
     if (changed)
       checkpoint();
     return changed;
