Index: lucene/src/test/org/apache/lucene/index/TestDocWriterPerThread.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDocWriterPerThread.java	(revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestDocWriterPerThread.java	(revision 0)
@@ -0,0 +1,79 @@
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.index;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestDocWriterPerThread extends LuceneTestCase {
+  /**
+   * Index 10 documents in 4 threads each.  Verify there
+   * are 40 docs, and 4 readers.
+   */
+  public void testSegmentPerThread() throws Exception {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    AddDocsThread[] addDocThreads = new AddDocsThread[4];
+    for (int x=0; x < addDocThreads.length; x++) {
+      addDocThreads[x] = new AddDocsThread(10, writer);
+      addDocThreads[x].start();
+    }
+    for (int x=0; x < addDocThreads.length; x++) {
+      addDocThreads[x].join();
+    }
+    IndexReader reader = writer.getReader();
+    assertEquals(40, reader.maxDoc());
+    assertEquals(4, reader.getSequentialSubReaders().length);
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  public class AddDocsThread extends Thread {
+    int num;
+
+    IndexWriter writer;
+
+    public AddDocsThread(int num, IndexWriter writer) {
+      this.num = num;
+      this.writer = writer;
+    }
+
+    public void run() {
+      try {
+        String indexName = "perthread";
+        for (int x = 0; x < num; x++) {
+          Document doc = new Document();
+          doc.add(new Field("id", Integer.toString(x), Store.YES,
+              Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+          doc.add(new Field("indexname", indexName, Store.YES,
+              Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+          writer.addDocument(doc);
+        }
+      } catch (Throwable th) {
+        th.printStackTrace();
+      }
+    }
+  }
+}
Index: lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(revision 927524)
+++ lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(working copy)
@@ -113,8 +113,8 @@
           else
             lastPerField.next = perField.next;
 
-          if (state.docWriter.infoStream != null)
-            state.docWriter.infoStream.println("  purge field=" + perField.fieldInfo.name);
+          if (state.docWriter.writer.infoStream != null)
+            state.docWriter.writer.infoStream.println("  purge field=" + perField.fieldInfo.name);
 
           totalFieldCount--;
 
Index: lucene/src/java/org/apache/lucene/index/DocumentsWriterThreadState.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocumentsWriterThreadState.java	(revision 927524)
+++ lucene/src/java/org/apache/lucene/index/DocumentsWriterThreadState.java	(working copy)
@@ -36,9 +36,9 @@
   public DocumentsWriterThreadState(DocumentsWriter docWriter) throws IOException {
     this.docWriter = docWriter;
     docState = new DocumentsWriter.DocState();
-    docState.maxFieldLength = docWriter.maxFieldLength;
-    docState.infoStream = docWriter.infoStream;
-    docState.similarity = docWriter.similarity;
+    docState.maxFieldLength = docWriter.writer.config.getMaxFieldLength();
+    docState.infoStream = docWriter.writer.infoStream;
+    docState.similarity = docWriter.writer.config.getSimilarity();
     docState.docWriter = docWriter;
     consumer = docWriter.consumer.addThread(this);
   }
Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 927524)
+++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -22,10 +22,9 @@
 import java.text.NumberFormat;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -40,8 +39,8 @@
 import org.apache.lucene.store.RAMFile;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Constants;
-import org.apache.lucene.util.ThreadInterruptedException;
 import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.ThreadInterruptedException;
 
 /**
  * This class accepts multiple added documents and directly
@@ -123,11 +122,8 @@
   private int numDocsInRAM;                       // # docs buffered in RAM
   int numDocsInStore;                     // # docs written to doc stores
 
-  // Max # ThreadState instances; if there are more threads
-  // than this they share ThreadStates
-  private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
-  private final HashMap<Thread,DocumentsWriterThreadState> threadBindings = new HashMap<Thread,DocumentsWriterThreadState>();
-
+  DocumentsWriterThreadState docWriterThreadState;
+  
   private int pauseThreads;               // Non-zero when we need all threads to
                                           // pause (eg to flush)
   boolean flushPending;                   // True when a thread has decided to flush
@@ -136,16 +132,22 @@
 
   private DocFieldProcessor docFieldProcessor;
 
-  PrintStream infoStream;
-  int maxFieldLength = IndexWriterConfig.UNLIMITED_FIELD_LENGTH;
-  Similarity similarity;
-
   // max # simultaneous threads; if there are more than
   // this, they wait for others to finish first
   private final int maxThreadStates;
 
   List<String> newFiles;
-
+  int flushedDocCount;
+  int localFlushedDocCount;
+  
+  void restoreFlushedDocCount() {
+    flushedDocCount = localFlushedDocCount;
+  }
+  
+  void saveFlushedDocCount() {
+    localFlushedDocCount = flushedDocCount;
+  }
+  
   static class DocState {
     DocumentsWriter docWriter;
     Analyzer analyzer;
@@ -270,11 +272,7 @@
   // Deletes done before the last flush; these are still
   // kept on abort
   private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
-
-  // The max number of delete terms that can be buffered before
-  // they must be flushed to disk.
-  private int maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
-
+  
   // How much RAM we can use before flushing.  This is 0 if
   // we are flushing by doc count instead.
   private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
@@ -290,31 +288,31 @@
   // non-zero we will flush by RAM usage instead.
   private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
 
-  private int flushedDocCount;                      // How many docs already flushed to index
-
-  synchronized void updateFlushedDocCount(int n) {
-    flushedDocCount += n;
-  }
+  //synchronized void updateFlushedDocCount(int n) {
+  //  flushedDocCount += n;
+  //}
   synchronized int getFlushedDocCount() {
     return flushedDocCount;
   }
-  synchronized void setFlushedDocCount(int n) {
-    flushedDocCount = n;
-  }
+  //synchronized void setFlushedDocCount(int n) {
+  //  flushedDocCount = n;
+  //}
 
   private boolean closed;
 
   DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates) throws IOException {
     this.directory = directory;
     this.writer = writer;
-    this.similarity = writer.getConfig().getSimilarity();
     this.maxThreadStates = maxThreadStates;
-    flushedDocCount = writer.maxDoc();
-
+    
+    // turn off having a set ram buffer size
+    setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+    
     consumer = indexingChain.getChain(this);
     if (consumer instanceof DocFieldProcessor) {
       docFieldProcessor = (DocFieldProcessor) consumer;
     }
+    this.docWriterThreadState = new DocumentsWriterThreadState(this);
   }
 
   /** Returns true if any of the fields in the current
@@ -323,29 +321,9 @@
     return (docFieldProcessor != null) ? docFieldProcessor.fieldInfos.hasProx()
                                        : true;
   }
-
-  /** If non-null, various details of indexing are printed
-   *  here. */
-  synchronized void setInfoStream(PrintStream infoStream) {
-    this.infoStream = infoStream;
-    for(int i=0;i<threadStates.length;i++)
-      threadStates[i].docState.infoStream = infoStream;
-  }
-
-  synchronized void setMaxFieldLength(int maxFieldLength) {
-    this.maxFieldLength = maxFieldLength;
-    for(int i=0;i<threadStates.length;i++)
-      threadStates[i].docState.maxFieldLength = maxFieldLength;
-  }
-
-  synchronized void setSimilarity(Similarity similarity) {
-    this.similarity = similarity;
-    for(int i=0;i<threadStates.length;i++)
-      threadStates[i].docState.similarity = similarity;
-  }
-
+  
   /** Set how much RAM we can use before flushing. */
-  synchronized void setRAMBufferSizeMB(double mb) {
+  private synchronized void setRAMBufferSizeMB(double mb) {
     if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
       ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
       waitQueuePauseBytes = 4*1024*1024;
@@ -366,7 +344,11 @@
       return ramBufferSize/1024./1024.;
     }
   }
-
+  
+  void setFlushedDocCount(int flushedDocCount) {
+    this.flushedDocCount = flushedDocCount;
+  }
+  
   /** Set max buffered docs, which means we will flush by
    *  doc count instead of by RAM usage. */
   void setMaxBufferedDocs(int count) {
@@ -406,7 +388,7 @@
     
     assert allThreadsIdle();
 
-    if (infoStream != null)
+    if (writer.infoStream != null)
       message("closeDocStore: " + openFiles.size() + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
     
     boolean success = false;
@@ -440,7 +422,7 @@
   }
 
   void message(String message) {
-    if (infoStream != null)
+    if (writer.infoStream != null)
       writer.message("DW: " + message);
   }
 
@@ -481,7 +463,7 @@
   synchronized void abort() throws IOException {
 
     try {
-      if (infoStream != null) {
+      if (writer.infoStream != null) {
         message("docWriter: now abort");
       }
 
@@ -507,13 +489,9 @@
         deletesInRAM.clear();
 
         openFiles.clear();
+        
+        docWriterThreadState.consumer.abort();
 
-        for(int i=0;i<threadStates.length;i++)
-          try {
-            threadStates[i].consumer.abort();
-          } catch (Throwable t) {
-          }
-
         try {
           consumer.abort();
         } catch (Throwable t) {
@@ -532,7 +510,7 @@
     } finally {
       aborting = false;
       notifyAll();
-      if (infoStream != null) {
+      if (writer.infoStream != null) {
         message("docWriter: done abort");
       }
     }
@@ -542,15 +520,13 @@
   private void doAfterFlush() throws IOException {
     // All ThreadStates should be idle when we are called
     assert allThreadsIdle();
-    threadBindings.clear();
     waitQueue.reset();
     segment = null;
     numDocsInRAM = 0;
     nextDocID = 0;
     bufferIsFull = false;
     flushPending = false;
-    for(int i=0;i<threadStates.length;i++)
-      threadStates[i].doAfterFlush();
+    docWriterThreadState.doAfterFlush();
     numBytesUsed = 0;
   }
 
@@ -576,9 +552,7 @@
   }
 
   private synchronized boolean allThreadsIdle() {
-    for(int i=0;i<threadStates.length;i++)
-      if (!threadStates[i].isIdle)
-        return false;
+    if (!docWriterThreadState.isIdle) return false;
     return true;
   }
 
@@ -609,7 +583,7 @@
 
     docStoreOffset = numDocsInStore;
 
-    if (infoStream != null)
+    if (writer.infoStream != null)
       message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
     
     boolean success = false;
@@ -624,11 +598,10 @@
       }
 
       Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
-      for(int i=0;i<threadStates.length;i++)
-        threads.add(threadStates[i].consumer);
+      threads.add(docWriterThreadState.consumer);
       consumer.flush(threads, flushState);
 
-      if (infoStream != null) {
+      if (writer.infoStream != null) {
         SegmentInfo si = new SegmentInfo(flushState.segmentName, flushState.numDocs, directory);
         final long newSegmentSize = si.sizeInBytes();
         String message = "  oldRAMSize=" + numBytesUsed +
@@ -637,9 +610,9 @@
           " new/old=" + nf.format(100.0*newSegmentSize/numBytesUsed) + "%";
         message(message);
       }
+      
+      writer.updateFlushedDocCount(flushState.numDocs);
 
-      flushedDocCount += flushState.numDocs;
-
       doAfterFlush();
 
       success = true;
@@ -710,55 +683,29 @@
    * buffer this deleted term after the thread state has
    * been acquired. */
   synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
-
-    // First, find a thread state.  If this thread already
-    // has affinity to a specific ThreadState, use that one
-    // again.
-    DocumentsWriterThreadState state = threadBindings.get(Thread.currentThread());
-    if (state == null) {
-
-      // First time this thread has called us since last
-      // flush.  Find the least loaded thread state:
-      DocumentsWriterThreadState minThreadState = null;
-      for(int i=0;i<threadStates.length;i++) {
-        DocumentsWriterThreadState ts = threadStates[i];
-        if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
-          minThreadState = ts;
-      }
-      if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
-        state = minThreadState;
-        state.numThreads++;
-      } else {
-        // Just create a new "private" thread state
-        DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
-        if (threadStates.length > 0)
-          System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
-        state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
-        threadStates = newArray;
-      }
-      threadBindings.put(Thread.currentThread(), state);
-    }
-
+    docWriterThreadState.docState.infoStream = writer.infoStream;
+    docWriterThreadState.docState.maxFieldLength = writer.config.getMaxFieldLength();
+    docWriterThreadState.docState.similarity = writer.config.getSimilarity();
     // Next, wait until my thread state is idle (in case
     // it's shared with other threads) and for threads to
     // not be paused nor a flush pending:
-    waitReady(state);
+    waitReady(docWriterThreadState);
 
     // Allocate segment name if this is the first doc since
     // last flush:
     initSegmentName(false);
 
-    state.isIdle = false;
+    docWriterThreadState.isIdle = false;
 
     boolean success = false;
     try {
-      state.docState.docID = nextDocID;
+      docWriterThreadState.docState.docID = nextDocID;
 
       assert writer.testPoint("DocumentsWriter.ThreadState.init start");
 
       if (delTerm != null) {
-        addDeleteTerm(delTerm, state.docState.docID);
-        state.doFlushAfter = timeToFlushDeletes();
+        addDeleteTerm(delTerm, docWriterThreadState.docState.docID);
+        docWriterThreadState.doFlushAfter = timeToFlushDeletes();
       }
 
       assert writer.testPoint("DocumentsWriter.ThreadState.init after delTerm");
@@ -773,23 +720,23 @@
           maxBufferedDocs != IndexWriterConfig.DISABLE_AUTO_FLUSH
           && numDocsInRAM >= maxBufferedDocs) {
         flushPending = true;
-        state.doFlushAfter = true;
+        docWriterThreadState.doFlushAfter = true;
       }
 
       success = true;
     } finally {
       if (!success) {
         // Forcefully idle this ThreadState:
-        state.isIdle = true;
+        docWriterThreadState.isIdle = true;
         notifyAll();
-        if (state.doFlushAfter) {
-          state.doFlushAfter = false;
+        if (docWriterThreadState.doFlushAfter) {
+          docWriterThreadState.doFlushAfter = false;
           flushPending = false;
         }
       }
     }
 
-    return state;
+    return docWriterThreadState;
   }
 
   /** Returns true if the caller (IndexWriter) should now
@@ -881,6 +828,7 @@
   }
 
   /** Called whenever a merge has completed and the merged segments had deletions */
+  /**
   synchronized void remapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount) {
     if (docMaps == null)
       // The merged segments had no deletes so docIDs did not change and we have nothing to do
@@ -888,9 +836,9 @@
     MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
     deletesInRAM.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
     deletesFlushed.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-    flushedDocCount -= mapper.docShift;
+    //writer.updateFlushedDocCount(-mapper.docShift);
   }
-
+  **/
   synchronized private void waitReady(DocumentsWriterThreadState state) {
 
     while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting)) {
@@ -932,6 +880,7 @@
   }
 
   synchronized boolean deletesFull() {
+    int maxBufferedDeleteTerms = writer.config.getMaxBufferedDeleteTerms();
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
             (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + numBytesUsed) >= ramBufferSize) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
@@ -947,6 +896,7 @@
     // too-frequent flushing of a long tail of tiny segments
     // when merges (which always apply deletes) are
     // infrequent.
+    int maxBufferedDeleteTerms = writer.config.getMaxBufferedDeleteTerms();
     return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
             (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
       (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
@@ -957,14 +907,6 @@
     return (bufferIsFull || deletesFull()) && setFlushPending();
   }
 
-  void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
-    this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
-  }
-
-  int getMaxBufferedDeleteTerms() {
-    return maxBufferedDeleteTerms;
-  }
-
   synchronized boolean hasDeletes() {
     return deletesFlushed.any();
   }
@@ -976,7 +918,7 @@
 
     final long t0 = System.currentTimeMillis();
 
-    if (infoStream != null)
+    if (writer.infoStream != null)
       message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
               deletesFlushed.docIDs.size() + " deleted docIDs and " +
               deletesFlushed.queries.size() + " deleted queries on " +
@@ -1002,7 +944,7 @@
     }
 
     deletesFlushed.clear();
-    if (infoStream != null) {
+    if (writer.infoStream != null) {
       message("apply deletes took " + (System.currentTimeMillis()-t0) + " msec");
     }
 
@@ -1096,7 +1038,7 @@
   // as the disk segments.
   synchronized private void addDeleteTerm(Term term, int docCount) {
     BufferedDeletes.Num num = deletesInRAM.terms.get(term);
-    final int docIDUpto = flushedDocCount + docCount;
+    final int docIDUpto = docCount;
     if (num == null)
       deletesInRAM.terms.put(term, new BufferedDeletes.Num(docIDUpto));
     else
@@ -1109,12 +1051,12 @@
   // Buffer a specific docID for deletion.  Currently only
   // used when we hit a exception when adding a document
   synchronized private void addDeleteDocID(int docID) {
-    deletesInRAM.docIDs.add(Integer.valueOf(flushedDocCount+docID));
+    deletesInRAM.docIDs.add(Integer.valueOf(docID));
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_DOCID);
   }
 
   synchronized private void addDeleteQuery(Query query, int docID) {
-    deletesInRAM.queries.put(query, Integer.valueOf(flushedDocCount + docID));
+    deletesInRAM.queries.put(query, Integer.valueOf(docID));
     deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
   }
 
@@ -1399,7 +1341,7 @@
 
     if (numBytesAlloc+deletesRAMUsed > freeTrigger) {
 
-      if (infoStream != null)
+      if (writer.infoStream != null)
         message("  RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) +
                 " vs trigger=" + toMB(flushTrigger) +
                 " allocMB=" + toMB(numBytesAlloc) +
@@ -1429,7 +1371,7 @@
               && !any) {
             // Nothing else to free -- must flush now.
             bufferIsFull = numBytesUsed+deletesRAMUsed > flushTrigger;
-            if (infoStream != null) {
+            if (writer.infoStream != null) {
               if (numBytesUsed > flushTrigger)
                 message("    nothing to free; now set bufferIsFull");
               else
@@ -1473,7 +1415,7 @@
         iter++;
       }
 
-      if (infoStream != null)
+      if (writer.infoStream != null)
         message("    after free: freedMB=" + nf.format((startBytesAlloc-numBytesAlloc-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((numBytesUsed+deletesRAMUsed)/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.));
       
     } else {
@@ -1485,7 +1427,7 @@
       synchronized(this) {
 
         if (numBytesUsed+deletesRAMUsed > flushTrigger) {
-          if (infoStream != null)
+          if (writer.infoStream != null)
             message("  RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) +
                     " allocMB=" + nf.format(numBytesAlloc/1024./1024.) +
                     " deletesMB=" + nf.format(deletesRAMUsed/1024./1024.) +
Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriter.java	(revision 927524)
+++ lucene/src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -17,33 +17,35 @@
  * limitations under the License.
  */
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.ThreadInterruptedException;
 import org.apache.lucene.util.Version;
 
-import java.io.IOException;
-import java.io.Closeable;
-import java.io.PrintStream;
-import java.util.List;
-import java.util.Collection;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Iterator;
-import java.util.Map;
-
 /**
   An <code>IndexWriter</code> creates and maintains an index.
 
@@ -269,11 +271,9 @@
   volatile long pendingCommitChangeCount;
 
   private SegmentInfos localRollbackSegmentInfos;      // segmentInfos we will fallback to if the commit fails
-  private int localFlushedDocCount;               // saved docWriter.getFlushedDocCount during local transaction
 
   private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
-  private DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
 
   private Set<SegmentInfo> segmentsToOptimize = new HashSet<SegmentInfo>();           // used by optimize to note those needing optimization
@@ -301,6 +301,9 @@
 
   private int flushCount;
   private int flushDeletesCount;
+  // the total of all flushed documents across
+  // document writers
+  private int flushedDocCount;
 
   // Used to only allow one addIndexes to proceed at once
   // TODO: use ReadWriteLock once we are on 5.0
@@ -308,6 +311,7 @@
   private Thread writeThread;                     // non-null if any thread holds write lock
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
+  private Map<Thread,DocumentsWriter> docWriters = new HashMap<Thread,DocumentsWriter>(); 
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -322,7 +326,7 @@
 
   // The instance that was passed to the constructor. It is saved only in order
   // to allow users to query an IndexWriter settings.
-  private final IndexWriterConfig config;
+  final IndexWriterConfig config;
 
   /**
    * Expert: returns a readonly reader, covering all
@@ -413,7 +417,7 @@
     // this method is called:
     poolReaders = true;
 
-    flush(true, true, false);
+    flushAll(true, true, false);
     
     // Prevent segmentInfos from changing while opening the
     // reader; in theory we could do similar retry logic,
@@ -688,6 +692,19 @@
     }
   }
   
+  /**
+   * Returns the doc writer for this thread. 
+   */
+  synchronized DocumentsWriter getDocWriterForThread() throws IOException {
+    Thread currentThread = Thread.currentThread();
+    DocumentsWriter docWriter = docWriters.get(currentThread);
+    if (docWriter == null) {
+      docWriter = new DocumentsWriter(directory, this, config.getIndexingChain(), 1);
+      docWriters.put(currentThread, docWriter);
+    }
+    return docWriter;
+  }
+  
   synchronized void acquireWrite() {
     assert writeThread != Thread.currentThread();
     while(writeThread != null || readCount > 0)
@@ -830,7 +847,6 @@
   public void setSimilarity(Similarity similarity) {
     ensureOpen();
     this.similarity = similarity;
-    docWriter.setSimilarity(similarity);
     // Required so config.getSimilarity returns the right value. But this will
     // go away together with the method in 4.0.
     config.setSimilarity(similarity);
@@ -1151,15 +1167,11 @@
 
       setRollbackSegmentInfos(segmentInfos);
 
-      docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates());
-      docWriter.setInfoStream(infoStream);
-      docWriter.setMaxFieldLength(maxFieldLength);
-
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
       deleter = new IndexFileDeleter(directory,
                                      conf.getIndexDeletionPolicy(),
-                                     segmentInfos, infoStream, docWriter);
+                                     segmentInfos, infoStream, this);
 
       if (deleter.startingCommitDeleted)
         // Deletion policy deleted the "head" commit point.
@@ -1168,9 +1180,6 @@
         // segments_N file.
         changeCount++;
 
-      docWriter.setMaxBufferedDeleteTerms(conf.getMaxBufferedDeleteTerms());
-      docWriter.setRAMBufferSizeMB(conf.getRAMBufferSizeMB());
-      docWriter.setMaxBufferedDocs(conf.getMaxBufferedDocs());
       pushMaxBufferedDocs();
 
       if (infoStream != null) {
@@ -1184,6 +1193,18 @@
     }
   }
   
+  synchronized Collection<DocumentsWriter> getDocWriters() {
+    return new ArrayList<DocumentsWriter>(docWriters.values());
+  }
+  
+  synchronized List<String> getDocWriterFiles() {
+    List<String> files = new ArrayList<String>();
+    for (DocumentsWriter docWriter : getDocWriters()) {
+      files.addAll(docWriter.openFiles());
+    }
+    return files;
+  }
+  
   private synchronized void setRollbackSegmentInfos(SegmentInfos infos) {
     rollbackSegmentInfos = (SegmentInfos) infos.clone();
     assert !rollbackSegmentInfos.hasExternalSegments(directory);
@@ -1330,7 +1351,6 @@
   public void setMaxFieldLength(int maxFieldLength) {
     ensureOpen();
     this.maxFieldLength = maxFieldLength;
-    docWriter.setMaxFieldLength(maxFieldLength);
     if (infoStream != null)
       message("setMaxFieldLength " + maxFieldLength);
     // Required so config.getMaxFieldLength returns the right value. But this
@@ -1378,7 +1398,6 @@
         && getRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
       throw new IllegalArgumentException(
           "at least one of ramBufferSize and maxBufferedDocs must be enabled");
-    docWriter.setMaxBufferedDocs(maxBufferedDocs);
     pushMaxBufferedDocs();
     if (infoStream != null)
       message("setMaxBufferedDocs " + maxBufferedDocs);
@@ -1393,11 +1412,11 @@
    * as its minMergeDocs, to keep backwards compatibility.
    */
   private void pushMaxBufferedDocs() {
-    if (docWriter.getMaxBufferedDocs() != DISABLE_AUTO_FLUSH) {
+    if (config.getMaxBufferedDocs() != DISABLE_AUTO_FLUSH) {
       final MergePolicy mp = mergePolicy;
       if (mp instanceof LogDocMergePolicy) {
         LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
-        final int maxBufferedDocs = docWriter.getMaxBufferedDocs();
+        final int maxBufferedDocs = config.getMaxBufferedDocs();
         if (lmp.getMinMergeDocs() != maxBufferedDocs) {
           if (infoStream != null)
             message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
@@ -1415,7 +1434,7 @@
    */
   public int getMaxBufferedDocs() {
     ensureOpen();
-    return docWriter.getMaxBufferedDocs();
+    return config.getMaxBufferedDocs();
   }
 
   /** Determines the amount of RAM that may be used for
@@ -1467,7 +1486,6 @@
     if (mb == DISABLE_AUTO_FLUSH && getMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
       throw new IllegalArgumentException(
           "at least one of ramBufferSize and maxBufferedDocs must be enabled");
-    docWriter.setRAMBufferSizeMB(mb);
     if (infoStream != null)
       message("setRAMBufferSizeMB " + mb);
     // Required so config.getRAMBufferSizeMB returns the right value. But this
@@ -1480,7 +1498,11 @@
    * @deprecated use {@link IndexWriterConfig#getRAMBufferSizeMB()} instead.
    */
   public double getRAMBufferSizeMB() {
-    return docWriter.getRAMBufferSizeMB();
+    double total = 0.0;
+    for (DocumentsWriter docWriter : getDocWriters()) {
+      total += docWriter.getRAMBufferSizeMB();
+    }
+    return total;
   }
 
   /**
@@ -1502,7 +1524,6 @@
         && maxBufferedDeleteTerms < 1)
       throw new IllegalArgumentException(
           "maxBufferedDeleteTerms must at least be 1 when enabled");
-    docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
     if (infoStream != null)
       message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
     // Required so config.getMaxBufferedDeleteTerms returns the right value. But
@@ -1518,7 +1539,7 @@
    */
   public int getMaxBufferedDeleteTerms() {
     ensureOpen();
-    return docWriter.getMaxBufferedDeleteTerms();
+    return config.getMaxBufferedDeleteTerms();
   }
 
   /** Determines how often segment indices are merged by addDocument().  With
@@ -1582,7 +1603,6 @@
   public void setInfoStream(PrintStream infoStream) {
     ensureOpen();
     setMessageID(infoStream);
-    docWriter.setInfoStream(infoStream);
     deleter.setInfoStream(infoStream);
     if (infoStream != null)
       messageState();
@@ -1756,21 +1776,25 @@
   }
 
   private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException {
-
-    docWriter.pauseAllThreads();
-
+    for (DocumentsWriter docWriter : getDocWriters()) {
+      docWriter.pauseAllThreads();
+    }
     try {
       if (infoStream != null)
         message("now flush at close");
 
-      docWriter.close();
-
       // Only allow a new merge to be triggered if we are
       // going to wait for merges:
       if (!hitOOM) {
-        flush(waitForMerges, true, true);
+        // flush the buffer of each doc writer
+        flushAll(waitForMerges, true, true);
       }
-
+      
+      // close each doc writer
+      for (DocumentsWriter docWriter : docWriters.values()) {
+        docWriter.close();
+      }
+      
       if (waitForMerges)
         // Give merge scheduler last chance to run, in case
         // any pending merges are waiting:
@@ -1795,7 +1819,7 @@
 
       synchronized(this) {
         readerPool.close();
-        docWriter = null;
+        //docWriter = null;
         deleter.close();
       }
       
@@ -1813,8 +1837,9 @@
         closing = false;
         notifyAll();
         if (!closed) {
-          if (docWriter != null)
+          for (DocumentsWriter docWriter : docWriters.values()) {
             docWriter.resumeAllThreads();
+          }
           if (infoStream != null)
             message("hit exception while closing");
         }
@@ -1822,76 +1847,6 @@
     }
   }
 
-  /** Tells the docWriter to close its currently open shared
-   *  doc stores (stored fields & vectors files).
-   *  Return value specifices whether new doc store files are compound or not.
-   */
-  private synchronized boolean flushDocStores() throws IOException {
-
-    boolean useCompoundDocStore = false;
-
-    String docStoreSegment;
-
-    boolean success = false;
-    try {
-      docStoreSegment = docWriter.closeDocStore();
-      success = true;
-    } finally {
-      if (!success && infoStream != null) {
-        message("hit exception closing doc store segment");
-      }
-    }
-
-    useCompoundDocStore = mergePolicy.useCompoundDocStore(segmentInfos);
-      
-    if (useCompoundDocStore && docStoreSegment != null && docWriter.closedFiles().size() != 0) {
-      // Now build compound doc store file
-
-      if (infoStream != null) {
-        message("create compound file " + IndexFileNames.segmentFileName(docStoreSegment, IndexFileNames.COMPOUND_FILE_STORE_EXTENSION));
-      }
-
-      success = false;
-
-      final int numSegments = segmentInfos.size();
-      final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
-
-      try {
-        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
-        for (final String file :  docWriter.closedFiles() ) {
-          cfsWriter.addFile(file);
-        }
-      
-        // Perform the merge
-        cfsWriter.close();
-        success = true;
-
-      } finally {
-        if (!success) {
-          if (infoStream != null)
-            message("hit exception building compound file doc store for segment " + docStoreSegment);
-          deleter.deleteFile(compoundFileName);
-          docWriter.abort();
-        }
-      }
-
-      for(int i=0;i<numSegments;i++) {
-        SegmentInfo si = segmentInfos.info(i);
-        if (si.getDocStoreOffset() != -1 &&
-            si.getDocStoreSegment().equals(docStoreSegment))
-          si.setDocStoreIsCompoundFile(true);
-      }
-
-      checkpoint();
-
-      // In case the files we just merged into a CFS were
-      // not previously checkpointed:
-      deleter.deleteNewFiles(docWriter.closedFiles());
-    }
-
-    return useCompoundDocStore;
-  }
-
   /** Returns the Directory used by this index. */
   public Directory getDirectory() {     
     // Pass false because the flush during closing calls getDirectory
@@ -1910,11 +1865,10 @@
    *  not counting deletions.
    *  @see #numDocs */
   public synchronized int maxDoc() {
-    int count;
-    if (docWriter != null)
-      count = docWriter.getNumDocsInRAM();
-    else
-      count = 0;
+    int count = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      count += docWriter.getNumDocsInRAM();
+    }
 
     for (int i = 0; i < segmentInfos.size(); i++)
       count += segmentInfos.info(i).docCount;
@@ -1928,11 +1882,10 @@
    *  counted you should call {@link #commit()} first.
    *  @see #numDocs */
   public synchronized int numDocs() throws IOException {
-    int count;
-    if (docWriter != null)
-      count = docWriter.getNumDocsInRAM();
-    else
-      count = 0;
+    int count = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      count += docWriter.getNumDocsInRAM();
+    }
 
     for (int i = 0; i < segmentInfos.size(); i++) {
       final SegmentInfo info = segmentInfos.info(i);
@@ -1943,8 +1896,9 @@
 
   public synchronized boolean hasDeletions() throws IOException {
     ensureOpen();
-    if (docWriter.hasDeletes())
-      return true;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      if (docWriter.hasDeletes()) return true;
+    }
     for (int i = 0; i < segmentInfos.size(); i++)
       if (segmentInfos.info(i).hasDeletions())
         return true;
@@ -2034,11 +1988,11 @@
    */
   public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = false;
     boolean success = false;
     try {
+      DocumentsWriter docWriter = getDocWriterForThread();
       try {
-        doFlush = docWriter.addDocument(doc, analyzer);
+        docWriter.addDocument(doc, analyzer);
         success = true;
       } finally {
         if (!success) {
@@ -2057,14 +2011,57 @@
           }
         }
       }
-      if (doFlush)
-        flush(true, false, false);
+      flushIfRAMExceeded();
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "addDocument");
     }
   }
-
+  
+  private void flushIfRAMExceeded() throws IOException {
+    if (ifFlushThread()) {
+      flush(true, true, true);
+    }
+  }
+  
   /**
+   * If all of the doc writer's RAM usage exceeds
+   * the max buffer size, flush the DW with the greatest
+   * usage.
+   */
+  private boolean ifFlushThread() throws IOException {
+    long ramUsage = getRAMUsage();
+    if (ramUsage > config.getRAMBufferSizeMB()*1024*1024) {
+      DocumentsWriter docWriter = getDWGreatestRAMUsage();
+      DocumentsWriter threadDocWriter = getDocWriterForThread();
+      if (threadDocWriter == threadDocWriter) {
+        return true;
+      }
+    }
+    return false;
+  }
+  
+  private DocumentsWriter getDWGreatestRAMUsage() {
+    Collection<DocumentsWriter> dws = getDocWriters();
+    return Collections.max(dws, new DWSizeComparator());
+  }
+  
+  static class DWSizeComparator implements Comparator<DocumentsWriter> {
+    public int compare(DocumentsWriter dw1, DocumentsWriter dw2) {
+      long ram1 = dw1.getRAMUsed();
+      long ram2 = dw2.getRAMUsed();
+      return (ram1 < ram2 ? -1 : (ram1 == ram2 ? 0 : 1));
+    }
+  }
+  
+  synchronized long getRAMUsage() {
+    long total = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      total += docWriter.getRAMUsed();
+    }
+    return total;
+  }
+  
+  /**
    * Deletes the document(s) containing <code>term</code>.
    *
    * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
@@ -2077,10 +2074,10 @@
    */
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
+    DocumentsWriter docWriter = getDocWriterForThread();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerm(term);
-      if (doFlush)
-        flush(true, false, false);
+      docWriter.bufferDeleteTerm(term);
+      flushIfRAMExceeded();
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteDocuments(Term)");
     }
@@ -2101,10 +2098,10 @@
    */
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
+    DocumentsWriter docWriter = getDocWriterForThread();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerms(terms);
-      if (doFlush)
-        flush(true, false, false);
+      docWriter.bufferDeleteTerms(terms);
+      flushIfRAMExceeded();
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteDocuments(Term..)");
     }
@@ -2123,9 +2120,9 @@
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQuery(query);
-    if (doFlush)
-      flush(true, false, false);
+    DocumentsWriter docWriter = getDocWriterForThread();
+    docWriter.bufferDeleteQuery(query);
+    flushIfRAMExceeded();
   }
 
   /**
@@ -2143,9 +2140,9 @@
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQueries(queries);
-    if (doFlush)
-      flush(true, false, false);
+    DocumentsWriter docWriter = getDocWriterForThread();
+    docWriter.bufferDeleteQueries(queries);
+    flushIfRAMExceeded();
   }
 
   /**
@@ -2192,10 +2189,10 @@
       throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = false;
       boolean success = false;
+      DocumentsWriter docWriter = getDocWriterForThread();
       try {
-        doFlush = docWriter.updateDocument(term, doc, analyzer);
+        docWriter.updateDocument(term, doc, analyzer);
         success = true;
       } finally {
         if (!success) {
@@ -2212,8 +2209,7 @@
           }
         }
       }
-      if (doFlush)
-        flush(true, false, false);
+      flushIfRAMExceeded();
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "updateDocument");
     }
@@ -2225,7 +2221,8 @@
   }
 
   // for test purpose
-  final synchronized int getNumBufferedDocuments(){
+  final synchronized int getNumBufferedDocuments() throws IOException {
+    DocumentsWriter docWriter = getDocWriterForThread();
     return docWriter.getNumDocsInRAM();
   }
 
@@ -2264,7 +2261,7 @@
 
   /** If non-null, information about merges will be printed to this.
    */
-  private PrintStream infoStream = null;
+  PrintStream infoStream = null;
   private static PrintStream defaultInfoStream = null;
 
   /**
@@ -2652,7 +2649,9 @@
       return null;
     }
   }
-
+  
+  
+  
   /*
    * Begin a transaction.  During a transaction, any segment
    * merges that happen (or ram segments flushed) will not
@@ -2672,12 +2671,14 @@
     try {
       if (infoStream != null)
         message("now start transaction");
+      
+      int numBufDeleteTerms = getNumBufferedDeleteTerms();
+      assert numBufDeleteTerms == 0 :
+      "calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + numBufDeleteTerms;
+      int numBufDocs = getNumBufferedDocuments();
+      assert getNumBufferedDocuments() == 0 :
+      "calling startTransaction with buffered documents not supported: numDocsInRAM=" + numBufDocs;
 
-      assert docWriter.getNumBufferedDeleteTerms() == 0 :
-      "calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.getNumBufferedDeleteTerms();
-      assert docWriter.getNumDocsInRAM() == 0 :
-      "calling startTransaction with buffered documents not supported: numDocsInRAM=" + docWriter.getNumDocsInRAM();
-
       ensureOpen();
 
       // If a transaction is trying to roll back (because
@@ -2706,9 +2707,11 @@
       localRollbackSegmentInfos = (SegmentInfos) segmentInfos.clone();
 
       assert !hasExternalSegments();
+      
+      for (DocumentsWriter docWriter : getDocWriters()) {
+        docWriter.saveFlushedDocCount();
+      }
 
-      localFlushedDocCount = docWriter.getFlushedDocCount();
-
       // We must "protect" our files at this point from
       // deletion in case we need to rollback:
       deleter.incRef(segmentInfos, false);
@@ -2729,8 +2732,8 @@
     if (infoStream != null)
       message("now rollback transaction");
 
-    if (docWriter != null) {
-      docWriter.setFlushedDocCount(localFlushedDocCount);
+    for (DocumentsWriter docWriter : getDocWriters()) {
+      docWriter.restoreFlushedDocCount();
     }
 
     // Must finish merges before rolling back segmentInfos
@@ -2816,9 +2819,11 @@
   private void rollbackInternal() throws IOException {
 
     boolean success = false;
+    
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      docWriter.pauseAllThreads();
+    }
 
-    docWriter.pauseAllThreads();
-
     try {
       finishMerges(false);
 
@@ -2847,7 +2852,9 @@
 
         assert !hasExternalSegments();
         
-        docWriter.abort();
+        for (DocumentsWriter docWriter : docWriters.values()) {
+          docWriter.abort();
+        }
 
         assert testPoint("rollback before checkpoint");
 
@@ -2868,7 +2875,9 @@
     } finally {
       synchronized(this) {
         if (!success) {
-          docWriter.resumeAllThreads();
+          for (DocumentsWriter docWriter : docWriters.values()) {
+            docWriter.resumeAllThreads();
+          }
           closing = false;
           notifyAll();
           if (infoStream != null)
@@ -2879,7 +2888,11 @@
 
     closeInternal(false);
   }
-
+  
+  void updateFlushedDocCount(int num) {
+    flushedDocCount += num;
+  }
+  
   /**
    * Delete all documents in the index.
    *
@@ -2896,15 +2909,21 @@
    *    will receive {@link MergePolicy.MergeAbortedException}s.
    */
   public synchronized void deleteAll() throws IOException {
-    docWriter.pauseAllThreads();
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      docWriter.resumeAllThreads();
+    }
     try {
 
       // Abort any running merges
       finishMerges(false);
 
       // Remove any buffered docs
-      docWriter.abort();
-      docWriter.setFlushedDocCount(0);
+      for (DocumentsWriter docWriter : docWriters.values()) {
+        docWriter.abort();
+      }
+      for (DocumentsWriter docWriter : docWriters.values()) {
+        docWriter.setFlushedDocCount(0);
+      }
 
       // Remove all segments
       segmentInfos.clear();
@@ -2921,7 +2940,9 @@
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteAll");
     } finally {
-      docWriter.resumeAllThreads();
+      for (DocumentsWriter docWriter : docWriters.values()) {
+        docWriter.resumeAllThreads();
+      }
       if (infoStream != null) {
         message("hit exception during deleteAll");
       }
@@ -3111,7 +3132,9 @@
     noDupDirs(dirs);
 
     // Do not allow add docs or deletes while we are running:
-    docWriter.pauseAllThreads();
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      docWriter.pauseAllThreads();
+    }
 
     try {
       if (infoStream != null)
@@ -3146,7 +3169,7 @@
         }
 
         // Notify DocumentsWriter that the flushed count just increased
-        docWriter.updateFlushedDocCount(docCount);
+        //docWriter.updateFlushedDocCount(docCount);
 
         maybeMerge();
 
@@ -3173,8 +3196,10 @@
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "addIndexesNoOptimize");
     } finally {
-      if (docWriter != null) {
-        docWriter.resumeAllThreads();
+      synchronized (this) {
+        for (DocumentsWriter docWriter : docWriters.values()) {
+          docWriter.resumeAllThreads();        
+        }
       }
     }
   }
@@ -3284,7 +3309,11 @@
     ensureOpen();
 
     // Do not allow add docs or deletes while we are running:
-    docWriter.pauseAllThreads();
+    synchronized (this) {
+      for (DocumentsWriter docWriter : docWriters.values()) {
+        docWriter.pauseAllThreads();
+      }
+    }
 
     // We must pre-acquire a read lock here (and upgrade to
     // write lock in startTransaction below) so that no
@@ -3350,7 +3379,7 @@
           }
 
           // Notify DocumentsWriter that the flushed count just increased
-          docWriter.updateFlushedDocCount(docCount);
+          //docWriter.updateFlushedDocCount(docCount);
 
           success = true;
 
@@ -3416,8 +3445,10 @@
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "addIndexes(IndexReader...)");
     } finally {
-      if (docWriter != null) {
-        docWriter.resumeAllThreads();
+      synchronized (this) {
+        for (DocumentsWriter docWriter : getDocWriters()) {
+          docWriter.resumeAllThreads();
+        }
       }
     }
   }
@@ -3613,16 +3644,26 @@
   protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
     // We can be called during close, when closing==true, so we must pass false to ensureOpen:
     ensureOpen(false);
-    if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
+    DocumentsWriter docWriter = getDocWriterForThread();
+    if (doFlush(flushDocStores, flushDeletes, docWriter) && triggerMerge)
       maybeMerge();
   }
 
+  protected final void flushAll(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+    // We can be called during close, when closing==true, so we must pass false to ensureOpen:
+    ensureOpen(false);
+    for (DocumentsWriter docWriter : getDocWriters()) {
+      if (doFlush(flushDocStores, flushDeletes, docWriter) && triggerMerge)
+        maybeMerge();
+    }
+  }
+  
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
   // even while a flush is happening
-  private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+  private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes, DocumentsWriter docWriter) throws CorruptIndexException, IOException {
     try {
-      return doFlushInternal(flushDocStores, flushDeletes);
+      return doFlushInternal(flushDocStores, flushDeletes, docWriter);
     } finally {
       docWriter.clearFlushPending();
     }
@@ -3631,7 +3672,7 @@
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
   // even while a flush is happening
-  private synchronized final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+  private final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes, DocumentsWriter docWriter) throws CorruptIndexException, IOException {
 
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
@@ -3690,18 +3731,6 @@
         message("  index before flush " + segString());
       }
 
-      // Check if the doc stores must be separately flushed
-      // because other segments, besides the one we are about
-      // to flush, reference it
-      if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) {
-        // We must separately flush the doc store
-        if (infoStream != null)
-          message("  flush shared docStore segment " + docStoreSegment);
-      
-        docStoreIsCompoundFile = flushDocStores();
-        flushDocStores = false;
-      }
-
       String segment = docWriter.getSegment();
 
       // If we are flushing docs, segment must not be null:
@@ -3748,11 +3777,6 @@
 
       docWriter.pushDeletes();
 
-      if (flushDocs) {
-        segmentInfos.add(newSegment);
-        checkpoint();
-      }
-
       if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
         // Now build compound file
         boolean success = false;
@@ -3774,12 +3798,18 @@
       if (flushDeletes) {
         applyDeletes();
       }
-      
-      if (flushDocs)
+      if (flushDocs) {
+        synchronized (this) {
+          segmentInfos.add(newSegment);
+        }
         checkpoint();
+      }
 
       doAfterFlush();
-
+      
+      // now throw away this documents writer
+      docWriter.close();
+      docWriters.remove(Thread.currentThread());
       return flushDocs;
 
     } catch (OutOfMemoryError oom) {
@@ -3796,14 +3826,22 @@
    */
   public final long ramSizeInBytes() {
     ensureOpen();
-    return docWriter.getRAMUsed();
+    long total = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      total += docWriter.getRAMUsed();
+    }
+    return total;
   }
 
   /** Expert:  Return the number of documents currently
    *  buffered in RAM. */
   public final synchronized int numRamDocs() {
     ensureOpen();
-    return docWriter.getNumDocsInRAM();
+    int total = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      total += docWriter.getNumDocsInRAM();
+    }
+    return total;
   }
 
   private int ensureContiguousMerge(MergePolicy.OneMerge merge) {
@@ -3936,7 +3974,9 @@
     final int start = ensureContiguousMerge(merge);
 
     commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
+    //for (DocumentsWriter docWriter : getDocWriters()) {
+    //  docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
+    //}
       
     // Simple optimization: if the doc store we are using
     // has been closed and is in now compound format (but
@@ -4161,94 +4201,15 @@
     // merging the doc stores (stored field & vectors).
     // This is a very substantial optimization (saves tons
     // of IO).
+    
+    SegmentInfo si = sourceSegments.info(0);
+    final int docStoreOffset = si.getDocStoreOffset();
+    final String docStoreSegment = si.getDocStoreSegment();
+    final boolean docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
 
-    Directory lastDir = directory;
-    String lastDocStoreSegment = null;
-    int next = -1;
-
-    boolean mergeDocStores = false;
-    boolean doFlushDocStore = false;
-    final String currentDocStoreSegment = docWriter.getDocStoreSegment();
-
-    // Test each segment to be merged: check if we need to
-    // flush/merge doc stores
-    for (int i = 0; i < end; i++) {
-      SegmentInfo si = sourceSegments.info(i);
-
-      // If it has deletions we must merge the doc stores
-      if (si.hasDeletions())
-        mergeDocStores = true;
-
-      // If it has its own (private) doc stores we must
-      // merge the doc stores
-      if (-1 == si.getDocStoreOffset())
-        mergeDocStores = true;
-
-      // If it has a different doc store segment than
-      // previous segments, we must merge the doc stores
-      String docStoreSegment = si.getDocStoreSegment();
-      if (docStoreSegment == null)
-        mergeDocStores = true;
-      else if (lastDocStoreSegment == null)
-        lastDocStoreSegment = docStoreSegment;
-      else if (!lastDocStoreSegment.equals(docStoreSegment))
-        mergeDocStores = true;
-
-      // Segments' docScoreOffsets must be in-order,
-      // contiguous.  For the default merge policy now
-      // this will always be the case but for an arbitrary
-      // merge policy this may not be the case
-      if (-1 == next)
-        next = si.getDocStoreOffset() + si.docCount;
-      else if (next != si.getDocStoreOffset())
-        mergeDocStores = true;
-      else
-        next = si.getDocStoreOffset() + si.docCount;
-      
-      // If the segment comes from a different directory
-      // we must merge
-      if (lastDir != si.dir)
-        mergeDocStores = true;
-
-      // If the segment is referencing the current "live"
-      // doc store outputs then we must merge
-      if (si.getDocStoreOffset() != -1 && currentDocStoreSegment != null && si.getDocStoreSegment().equals(currentDocStoreSegment)) {
-        doFlushDocStore = true;
-      }
-    }
-
-    final int docStoreOffset;
-    final String docStoreSegment;
-    final boolean docStoreIsCompoundFile;
-
-    if (mergeDocStores) {
-      docStoreOffset = -1;
-      docStoreSegment = null;
-      docStoreIsCompoundFile = false;
-    } else {
-      SegmentInfo si = sourceSegments.info(0);        
-      docStoreOffset = si.getDocStoreOffset();
-      docStoreSegment = si.getDocStoreSegment();
-      docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
-    }
-
-    if (mergeDocStores && doFlushDocStore) {
-      // SegmentMerger intends to merge the doc stores
-      // (stored fields, vectors), and at least one of the
-      // segments to be merged refers to the currently
-      // live doc stores.
-
-      // TODO: if we know we are about to merge away these
-      // newly flushed doc store files then we should not
-      // make compound file out of them...
-      if (infoStream != null)
-        message("now flush at merge");
-      doFlush(true, false);
-    }
-
     merge.increfDone = true;
 
-    merge.mergeDocStores = mergeDocStores;
+    merge.mergeDocStores = true;
 
     // Bind a new segment name here so even with
     // ConcurrentMergePolicy we keep deterministic segment
@@ -4264,7 +4225,7 @@
     Map<String,String> details = new HashMap<String,String>();
     details.put("optimize", Boolean.toString(merge.optimize));
     details.put("mergeFactor", Integer.toString(end));
-    details.put("mergeDocStores", Boolean.toString(mergeDocStores));
+    details.put("mergeDocStores", Boolean.toString(merge.mergeDocStores));
     setDiagnostics(merge.info, "merge", details);
 
     // Also enroll the merged segment into mergingSegments;
@@ -4390,13 +4351,6 @@
       // necessary to merge doc stores, go and open them:
       if (mergeDocStores && !merge.mergeDocStores) {
         merge.mergeDocStores = true;
-        synchronized(this) {
-          if (dss.contains(docWriter.getDocStoreSegment())) {
-            if (infoStream != null)
-              message("now flush at mergeMiddle");
-            doFlush(true, false);
-          }
-        }
 
         for(int i=0;i<numSegments;i++) {
           merge.readersClone[i].openDocStores();
@@ -4543,9 +4497,11 @@
     flushDeletesCount++;
     SegmentInfos rollback = (SegmentInfos) segmentInfos.clone();
     boolean success = false;
-    boolean changed;
+    boolean changed = false;
     try {
-      changed = docWriter.applyDeletes(segmentInfos);
+      for (DocumentsWriter docWriter : getDocWriters()) {
+        changed |= docWriter.applyDeletes(segmentInfos);
+      }
       success = true;
     } finally {
       if (!success) {
@@ -4577,12 +4533,20 @@
 
   // For test purposes.
   final synchronized int getBufferedDeleteTermsSize() {
-    return docWriter.getBufferedDeleteTerms().size();
+    int total = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      total += docWriter.getBufferedDeleteTerms().size();
+    }
+    return total;
   }
 
   // For test purposes.
   final synchronized int getNumBufferedDeleteTerms() {
-    return docWriter.getNumBufferedDeleteTerms();
+    int total = 0;
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      total += docWriter.getNumBufferedDeleteTerms();
+    }
+    return total;
   }
 
   // utility routines for tests
@@ -4922,10 +4886,17 @@
       // stale
       return false;
     } else {
-      return !docWriter.anyChanges();
+      return !anyChanges();
     }
   }
-
+  
+  boolean anyChanges() {
+    for (DocumentsWriter docWriter : docWriters.values()) {
+      if (docWriter.anyChanges()) return true;
+    }
+    return false;
+  }
+  
   synchronized boolean isClosed() {
     return closed;
   }
Index: lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java	(revision 927524)
+++ lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java	(working copy)
@@ -96,7 +96,7 @@
   private PrintStream infoStream;
   private Directory directory;
   private IndexDeletionPolicy policy;
-  private DocumentsWriter docWriter;
+  IndexWriter writer;
 
   final boolean startingCommitDeleted;
 
@@ -122,10 +122,9 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
+  public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, IndexWriter writer)
     throws CorruptIndexException, IOException {
 
-    this.docWriter = docWriter;
     this.infoStream = infoStream;
 
     if (infoStream != null)
@@ -133,6 +132,7 @@
 
     this.policy = policy;
     this.directory = directory;
+    this.writer = writer;
 
     // First pass: walk the files and initialize our ref
     // counts:
@@ -396,8 +396,8 @@
     } else {
 
       final List<String> docWriterFiles;
-      if (docWriter != null) {
-        docWriterFiles = docWriter.openFiles();
+      if (writer != null) {
+        docWriterFiles = writer.getDocWriterFiles();
         if (docWriterFiles != null)
           // We must incRef these files before decRef'ing
           // last files to make sure we don't accidentally
