Index: src/test/org/apache/lucene/index/TestNRT.java
===================================================================
--- src/test/org/apache/lucene/index/TestNRT.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestNRT.java	(revision 0)
@@ -0,0 +1,232 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNRT extends LuceneTestCase {
+  Random random = new Random();
+  
+  public void testSimple() throws Exception {
+    Directory primaryDir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    
+    IndexWriter ramWriter = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    ramWriter.name = "ram";
+    IndexWriter primaryWriter = new IndexWriter(primaryDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    primaryWriter.name = "primary";
+    //primaryWriter.setInfoStream(System.out);
+    NRT nrt = new NRT(ramWriter, primaryWriter, 1024*600);
+    
+    for (int i = 0; i < 100; i++) {
+      primaryWriter.addDocument(TestIndexWriterReader.createDocument(i,
+          "primary", 4));
+    }
+    for (int i = 0; i < 100; i++) {
+      nrt.addDocument(TestIndexWriterReader.createDocument(i, "ram", 4));
+    }
+
+    IndexReader reader = nrt.getReader();
+    assertEquals(200, reader.maxDoc());
+    IndexReader ramReader = ramWriter.getReader();
+    assertEquals(100, ramReader.maxDoc());
+    IndexReader primaryReader = primaryWriter.getReader();
+    assertEquals(100, primaryReader.maxDoc());
+    
+    ramReader.close();
+    primaryReader.close();
+    reader.close();
+
+    nrt.flush();
+    primaryReader = primaryWriter.getReader();
+    assertEquals(200, primaryReader.maxDoc());
+    ramReader = ramWriter.getReader();
+    assertEquals(0, ramReader.maxDoc());
+
+    ramReader.close();
+    primaryReader.close();
+    
+    nrt.close();
+    
+    primaryWriter.close();
+    ramWriter.close();
+    
+    ramDir.close();
+    primaryDir.close();
+  }
+  
+  /**
+   * This tests synchronization of the primary and ram writers.
+   * 
+   * @throws Exception
+   */
+  public void testRandomThreads() throws Exception {
+    long duration = 1000 * 15;
+    AtomicInteger totalAdded = new AtomicInteger(0);
+
+    Directory primaryDir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    
+    IndexWriter ramWriter = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter primaryWriter = new IndexWriter(primaryDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    //primaryWriter.setInfoStream(System.out);
+    NRT nrt = new NRT(ramWriter, primaryWriter, 1024*600);
+
+    GetReadersThread[] grts = new GetReadersThread[3];
+    for (int x = 0; x < grts.length; x++) {
+      grts[x] = new GetReadersThread("getreadersthread" + x, nrt);
+    }
+    AddDocsThread[] adts = new AddDocsThread[3];
+    for (int x = 0; x < adts.length; x++) {
+      adts[x] = new AddDocsThread("adddocsthread " + x, nrt, totalAdded);
+    }
+    for (int x = 0; x < adts.length; x++) {
+      adts[x].start();
+    }
+    for (int x = 0; x < grts.length; x++) {
+      grts[x].start();
+    }
+    long startTime = System.currentTimeMillis();
+    while (true) {
+      if ((System.currentTimeMillis() - startTime) > duration) {
+        break;
+      }
+    }
+    System.out.println(duration+" elapsed");
+    for (int x = 0; x < adts.length; x++) {
+      adts[x].dorun = false;
+    }
+    for (int x = 0; x < grts.length; x++) {
+      grts[x].dorun = false;
+    }
+    for (int x = 0; x < adts.length; x++) {
+      adts[x].waitForThread();
+      //System.out.println("add docs errors "+x);
+      //for (Throwable th : adts[x].errors) { 
+      //  th.printStackTrace();
+      //}
+    }
+    for (int x = 0; x < grts.length; x++) {
+      grts[x].waitForThread();
+    }
+    System.out.println("Completed...");
+    IndexReader reader = nrt.getReader();
+    assertEquals(totalAdded.get(), reader.maxDoc());
+    reader.close();
+    
+    primaryWriter.close();
+    ramWriter.close();
+    
+    ramDir.close();
+    primaryDir.close();
+    
+    System.out.println("Closed...");
+  }
+
+  public class GenericThread extends Thread {
+    boolean dorun = true;
+
+    List<Throwable> errors = new ArrayList<Throwable>();
+
+    NRT ramnrt;
+
+    public GenericThread(String threadName, NRT ramnrt) {
+      this.ramnrt = ramnrt;
+      setName(threadName);
+    }
+    
+    public void waitForThread() throws InterruptedException {
+      System.out.println("waiting for "+getName());
+      join();
+    }
+  }
+
+  /**
+   * Check to insure the readers are not identical
+   */
+  public class GetReadersThread extends GenericThread {
+    List<String> failures = new ArrayList<String>();
+
+    public GetReadersThread(String threadName, NRT ramnrt) {
+      super(threadName, ramnrt);
+    }
+
+    public void run() {
+      try {
+        while (dorun) {
+          int num = random.nextInt(100);
+          for (int x=0; x < num; x++) {
+            if (!dorun) return;
+            IndexReader reader = ramnrt.getReader();
+            Thread.sleep(random.nextInt(1000));
+            reader.close();
+          }
+        }
+      } catch (Throwable ex) {
+        ex.printStackTrace();
+        errors.add(ex);
+        if (errors.size() > 5) {
+          throw new RuntimeException(ex);
+        }
+      }
+    }
+  }
+
+  public class AddDocsThread extends GenericThread {
+    AtomicInteger totalAdded;
+
+    public AddDocsThread(String threadName, NRT ramnrt,
+        AtomicInteger totalAdded) {
+      super(threadName, ramnrt);
+      this.totalAdded = totalAdded;
+    }
+
+    public void run() {
+      try {
+        while (dorun) {
+          int numdocs = random.nextInt(1000);
+          for (int x = 0; x < numdocs; x++) {
+            if (!dorun)
+              return;
+            ramnrt.addDocument(TestIndexWriterReader.createDocument(x, "", 3));
+            totalAdded.incrementAndGet();
+          }
+          //System.out.println("added " + numdocs + " docs");
+          ramnrt.flush();
+          System.out.println("added " + numdocs + " docs after flush");
+        }
+      } catch (Throwable th) {
+        th.printStackTrace();
+        errors.add(th);
+      }
+    }
+  }
+}
Index: src/test/org/apache/lucene/index/TestNRTReaderWithThreads2.java
===================================================================
--- src/test/org/apache/lucene/index/TestNRTReaderWithThreads2.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestNRTReaderWithThreads2.java	(revision 0)
@@ -0,0 +1,125 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNRTReaderWithThreads2 extends LuceneTestCase {
+  Random random = new Random();
+  AtomicInteger seq = new AtomicInteger(1);
+
+  public void testIndexing() throws Exception {
+    Directory primaryDir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    
+    IndexWriter ramWriter = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    ramWriter.name = "ram";
+    ramWriter.setUseCompoundFile(false);
+    IndexWriter primaryWriter = new IndexWriter(primaryDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    primaryWriter.name = "primary";
+    //primaryWriter.setInfoStream(System.out);
+    NRT nrt = new NRT(ramWriter, primaryWriter, 1024*1024);
+    
+    IndexReader reader = ramWriter.getReader(); // start pooling readers
+    reader.close();
+    RunThread[] indexThreads = new RunThread[4];
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x] = new RunThread(x % 2, nrt);
+      indexThreads[x].setName("Thread " + x);
+      indexThreads[x].start();
+    }    
+    long startTime = System.currentTimeMillis();
+    long duration = 60*1000;
+    while ((System.currentTimeMillis() - startTime) < duration) {
+      Thread.sleep(100);
+    }
+    int delCount = 0;
+    int addCount = 0;
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x].run = false;
+      assertTrue(indexThreads[x].ex == null);
+      addCount += indexThreads[x].addCount;
+      delCount += indexThreads[x].delCount;
+    }
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x].join();
+    }
+    System.err.println("primaryWriter.close");
+    nrt.close();
+    primaryWriter.close();
+    System.err.println("primaryWriter.close done");
+    System.err.println("ramWriter.close");
+    ramWriter.close();
+    System.err.println("ramWriter.close done");
+    
+    ramDir.close();
+    primaryDir.close();
+  }
+
+  public class RunThread extends Thread {
+    NRT nrt;
+    boolean run = true;
+    Throwable ex;
+    int delCount = 0;
+    int addCount = 0;
+    int type;
+
+    public RunThread(int type, NRT nrt) {
+      this.type = type;
+      this.nrt = nrt;
+    }
+
+    public void run() {
+      try {
+        while (run) {
+          //int n = random.nextInt(2);
+          if (type == 0) {
+            int i = seq.addAndGet(1);
+            Document doc = TestIndexWriterReader.createDocument(i, "index1", 10);
+            nrt.addDocument(doc);
+            addCount++;
+          } else if (type == 1) {
+            // we may or may not delete because the term may not exist,
+            // we're opening and closing the reader rapidly
+            IndexReader reader = nrt.getReader();
+            int id = random.nextInt(seq.intValue());
+            Term term = new Term("id", Integer.toString(id));
+            int count = TestIndexWriterReader.count(term, reader);
+            nrt.deleteDocuments(term);
+            reader.close();
+            delCount += count;
+          }
+        }
+      } catch (Throwable ex) {
+        ex.printStackTrace(System.out);
+        this.ex = ex;
+        run = false;
+      }
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/MergePolicy.java	(revision 833130)
+++ src/java/org/apache/lucene/index/MergePolicy.java	(working copy)
@@ -85,10 +85,12 @@
     final boolean useCompoundFile;
     boolean aborted;
     Throwable error;
+    IndexWriter writer;
 
-    public OneMerge(SegmentInfos segments, boolean useCompoundFile) {
+    public OneMerge(IndexWriter writer, SegmentInfos segments, boolean useCompoundFile) {
       if (0 == segments.size())
         throw new RuntimeException("segments must include at least one segment");
+      this.writer = writer;
       this.segments = segments;
       this.useCompoundFile = useCompoundFile;
     }
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java	(revision 833130)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java	(working copy)
@@ -242,7 +242,7 @@
         // First, enroll all "full" merges (size
         // mergeFactor) to potentially be run concurrently:
         while (last - maxNumSegments + 1 >= mergeFactor) {
-          spec.add(new OneMerge(infos.range(last-mergeFactor, last), useCompoundFile));
+          spec.add(new OneMerge(writer, infos.range(last-mergeFactor, last), useCompoundFile));
           last -= mergeFactor;
         }
 
@@ -254,7 +254,7 @@
             // Since we must optimize down to 1 segment, the
             // choice is simple:
             if (last > 1 || !isOptimized(infos.info(0)))
-              spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
+              spec.add(new OneMerge(writer, infos.range(0, last), useCompoundFile));
           } else if (last > maxNumSegments) {
 
             // Take care to pick a partial merge that is
@@ -282,7 +282,7 @@
               }
             }
 
-            spec.add(new OneMerge(infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile));
+            spec.add(new OneMerge(writer, infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile));
           }
         }
         
@@ -322,7 +322,7 @@
           // deletions, so force a merge now:
           if (verbose())
             message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
-          spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
+          spec.add(new OneMerge(writer, segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
           firstSegmentWithDeletions = i;
         }
       } else if (firstSegmentWithDeletions != -1) {
@@ -331,7 +331,7 @@
         // mergeFactor segments
         if (verbose())
           message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
-        spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
+        spec.add(new OneMerge(writer, segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
         firstSegmentWithDeletions = -1;
       }
     }
@@ -339,7 +339,7 @@
     if (firstSegmentWithDeletions != -1) {
       if (verbose())
         message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
-      spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
+      spec.add(new OneMerge(writer, segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
     }
 
     return spec;
@@ -439,7 +439,7 @@
             spec = new MergeSpecification();
           if (verbose())
             message("    " + start + " to " + end + ": add this merge");
-          spec.add(new OneMerge(infos.range(start, end), useCompoundFile));
+          spec.add(new OneMerge(writer, infos.range(start, end), useCompoundFile));
         } else if (verbose())
           message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
 
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(revision 833130)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(working copy)
@@ -99,7 +99,7 @@
   
   private void message(String message) {
     if (verbose())
-      writer.message("CMS: " + message);
+      writer.message(writer.name+" CMS: " + message);
   }
 
   private synchronized void initMergeThreadPriority() {
@@ -241,7 +241,7 @@
     final MergeThread thread = new MergeThread(writer, merge);
     thread.setThreadPriority(mergeThreadPriority);
     thread.setDaemon(true);
-    thread.setName("Lucene Merge Thread #" + mergeThreadCount++);
+    thread.setName(writer.name+" Lucene Merge Thread #" + mergeThreadCount++);
     return thread;
   }
 
Index: src/java/org/apache/lucene/index/NRT.java
===================================================================
--- src/java/org/apache/lucene/index/NRT.java	(revision 0)
+++ src/java/org/apache/lucene/index/NRT.java	(revision 0)
@@ -0,0 +1,324 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.RAMDirectory;
+
+/**
+ * Provides near realtime search capability using a 
+ * RAM writer and a primary (disk) writer.  New documents are
+ * added to the RAM based index writer.  When the RAM writer's
+ * RAM usage exceeds a given size, the RAM segments are merged
+ * into a single primary writer segment in a single background thread.
+ */
+public class NRT {
+  private IndexWriter ramWriter;
+
+  private IndexWriter primaryWriter;
+
+  private RAMDirectory ramDirectory;
+
+  private long maxRam;
+
+  // for debugging
+  Set<SegmentInfo> ramNrtRemovedInfos = new HashSet<SegmentInfo>();
+  // for debugging
+  static NRT nrt;
+  //for debugging
+  List<SegmentInfo> ramInfosDeletes = new ArrayList<SegmentInfo>();
+  //for debugging
+  List ramInfosDeletesStacktraces = new ArrayList();
+  
+  protected ExecutorService threadPool;
+
+  private AtomicBoolean flushRunning = new AtomicBoolean(false);
+
+  private Throwable flushException;
+
+  public NRT(IndexWriter ramWriter, IndexWriter primaryWriter, long maxRam) {
+    this.ramWriter = ramWriter;
+    nrt = this;
+    ramDirectory = (RAMDirectory) ramWriter.getDirectory();
+    this.primaryWriter = primaryWriter;
+    this.maxRam = maxRam;
+    threadPool = Executors.newFixedThreadPool(1);
+  }
+  
+  /**
+   * Shuts down the background thread.
+   * @throws Exception
+   */
+  public void close() throws Exception {
+    threadPool.shutdown();
+    while (!threadPool.awaitTermination(9999, TimeUnit.MILLISECONDS)) {
+      Thread.sleep(50);
+    }
+    assert !flushRunning.get();
+  }
+  
+  public class FlushTask implements Callable {
+    public Object call() throws Exception {
+      try {
+        flushRunning.set(true);
+        try {
+          doFlush();
+        } finally {
+          flushRunning.set(false);
+        }
+      } catch (Throwable th) {
+        flushException = th;
+        return null;
+      }
+      return null;
+    }
+  }
+
+  /**
+   * Flush the RAM writer's segments and buffer 
+   * to the primary writer in a single background thread.
+   * 
+   * @throws IOException
+   */
+  public void flush() throws IOException {
+    if (flushException != null) {
+      Throwable th = flushException;
+      flushException = null;
+      throw new RuntimeException(th);
+    }
+    if (!flushRunning.get()) {
+      threadPool.submit(new FlushTask());
+    }
+  }
+  
+  /**
+   * Provides the main logic for flushing 
+   * @throws IOException
+   */
+  private void doFlush() throws IOException {
+    ramWriter.flush(false, true, false);
+    SegmentInfos ramInfos = null;
+    synchronized (ramWriter) {
+      ramInfos = ramWriter.getSegmentInfosCopy();
+      for (SegmentInfo ramInfo : ramInfos) {
+        if (ramWriter.mergingSegments.contains(ramInfo)) {
+          // this segment is already in a merge
+          // so just return
+          // TODO: I guess we could loop and wait until we get segments
+          // that have not been scheduled for a merge yet?
+          return;
+        }
+      }
+      // adding ram infos to mergingSegments
+      // keeps them from being merged in ram writer
+      for (SegmentInfo ramInfo : ramInfos) {
+        ramWriter.mergingSegments.add(ramInfo);
+      }
+    }
+    if (ramInfos.size() > 0) {
+      MergePolicy.OneMerge primaryMerge = new MergePolicy.OneMerge(ramWriter,
+          ramInfos, primaryWriter.getUseCompoundFile());
+      // this ram merge doesn't actually occur, it's only for passing
+      // into ramWriter.mergeFinish for convenience
+      MergePolicy.OneMerge ramMerge = new MergePolicy.OneMerge(ramWriter,
+          ramInfos, ramWriter.getUseCompoundFile());
+      ramMerge.registerDone = true;
+
+      synchronized (primaryWriter) {
+        for (SegmentInfo info : primaryMerge.segments) {
+          primaryWriter.segmentInfos.add(info);
+        }
+        if (primaryWriter.registerMerge(primaryMerge)) {
+          primaryWriter.pendingMerges.remove(primaryMerge);
+          primaryWriter.runningMerges.add(primaryMerge);
+        }
+      }
+      primaryWriter.merge(primaryMerge);
+      // remove the merged infos from the ram writer
+      synchronized (primaryWriter) {
+        synchronized (ramWriter) {
+          ramWriter.mergeFinish(ramMerge);
+          // clear out changes in the readers
+          ramWriter.readerPool.clear(ramInfos);
+          for (SegmentInfo info : ramInfos) {
+            assert !ramWriter.mergingSegments.contains(info);
+            assert !primaryWriter.mergingSegments.contains(info);
+            assert !primaryWriter.segmentInfos.contains(info);
+            ramWriter.segmentInfos.remove(info);
+            ramNrtRemovedInfos.add(info);
+          }
+          // this deletes files used by the ram segments
+          // that have been merged into the primary writer
+          ramWriter.deleter.checkpoint(ramWriter.segmentInfos, false);
+        }
+      }
+    }
+  }
+  
+  /**
+   * Gets readers from the primary and ram writers.
+   * @throws IOException
+   */
+  public IndexReader getReader() throws IOException {
+    return getReader(IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+  }
+  
+  /**
+   * Gets readers from the primary and ram writers.
+   * @throws IOException
+   */
+  public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
+    IndexReader[] ramReaders = ramWriter
+        .getReader(termInfosIndexDivisor, false).getSequentialSubReaders();
+    IndexReader[] primaryReaders = primaryWriter.getReader(
+        termInfosIndexDivisor, false).getSequentialSubReaders();
+    HashSet<IndexReader> readers = new HashSet<IndexReader>(ramReaders.length
+        + primaryReaders.length);
+    int i = 0;
+    Set<SegmentInfo> infos = new HashSet<SegmentInfo>();
+    SegmentInfos ramInfos = new SegmentInfos();
+    for (int x = 0; x < ramReaders.length; x++) {
+      SegmentInfo info = ((SegmentReader) ramReaders[x]).getSegmentInfo();
+      infos.add(info);
+      readers.add(ramReaders[x]);
+    }
+    for (int x = 0; x < primaryReaders.length; x++) {
+      SegmentInfo info = ((SegmentReader) primaryReaders[x]).getSegmentInfo();
+      infos.add(info);
+      readers.add(primaryReaders[x]);
+    }
+    // make sure there are no duplicate segment infos
+    // between the writers
+    assert infos.size() == ramReaders.length + primaryReaders.length;
+    MultiReader reader = new MultiReader((IndexReader[]) readers
+        .toArray(new IndexReader[0]));
+    return reader;
+  }
+  
+  /**
+   * Returns the RAM used by the buffer, and the RAMDirectory;
+   */
+  public long getRAMSize() {
+    return ramWriter.ramSizeInBytes() + ramDirectory.sizeInBytes();
+  }
+  
+  /**
+   * Adds a document to the RAM writer.  
+   * <p>See {@link IndexWriter#addDocument(Document)}.</p>
+   */
+  public void addDocument(Document doc) throws IOException {
+    addDocument(doc, primaryWriter.getAnalyzer());
+  }
+  
+  /**
+   * Adds a document to the RAM writer.  
+   * <p>See {@link IndexWriter#addDocument(Document, Analyzer)}.</p>
+   */
+  public void addDocument(Document doc, Analyzer analyzer) throws IOException {
+    ramWriter.addDocument(doc, analyzer);
+    ifFlush();
+  }
+  
+  /**
+   * Deletes the given terms from the RAM writer and the primary
+   * writer.
+   * <p>See {@link IndexWriter#deleteDocuments(Term)}.</p>
+   */
+  public void deleteDocuments(Term... terms) throws CorruptIndexException,
+      IOException {
+    ramWriter.deleteDocuments(terms);
+    primaryWriter.deleteDocuments(terms);
+    ifFlush();
+  }
+  
+  /**
+   * Deletes the given query from the RAM writer and the primary
+   * writer.
+   * <p>See {@link IndexWriter#deleteDocuments(Query)}.</p>
+   */
+  public void deleteDocuments(Query query) throws CorruptIndexException,
+      IOException {
+    ramWriter.deleteDocuments(query);
+    primaryWriter.deleteDocuments(query);
+    ifFlush();
+  }
+  
+  /**
+   * Deletes the given queries from the RAM writer and the primary
+   * writer.
+   * <p>See {@link IndexWriter#deleteDocuments(Query...)}.</p>
+   */
+  public void deleteDocuments(Query... queries) throws CorruptIndexException,
+      IOException {
+    ramWriter.deleteDocuments(queries);
+    primaryWriter.deleteDocuments(queries);
+    ifFlush();
+  }
+  
+  /**
+   * Call updateDocument on the RAM writer and delete document of the 
+   * term from the primary writer;
+   * <p>See {@link IndexWriter#updateDocument(Term,Document,Analyzer)}.</p>
+   */
+  public void updateDocument(Term term, Document doc, Analyzer analyzer)
+      throws CorruptIndexException, IOException {
+    ramWriter.updateDocument(term, doc, analyzer);
+    primaryWriter.deleteDocuments(term);
+    ifFlush();
+  }
+  
+  /**
+   * Call updateDocument on the RAM writer and delete document of the 
+   * term from the primary writer;
+   * <p>See {@link IndexWriter#updateDocument(Term,Document)}.</p>
+   */
+  public void updateDocument(Term term, Document doc)
+      throws CorruptIndexException, IOException {
+    ramWriter.updateDocument(term, doc);
+    primaryWriter.deleteDocuments(term);
+    ifFlush();
+  }
+  
+  /**
+   * If the current RAM consumption exceeds the allocated 
+   * size, then ask the background flush thread to start. 
+   * @throws IOException
+   */
+  private void ifFlush() throws IOException {
+    if (getRAMSize() > maxRam) {
+      //long ramSizeBytes = ramWriter.ramSizeInBytes();
+      //long ramDirBytes = ramDirectory.sizeInBytes();
+      // if (infoStream != null)
+      // infoStream.println("ramSize:"+ramSizeBytes+" ramDir:"+ramDirBytes);
+      flush();
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 833130)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -944,8 +944,13 @@
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
-
+      // assert infos.info(i).dir == directory;
+      if (infos.info(i).dir == directory) {
+        if (writer.name.equals("ram")) {
+          //System.err.println("applyDeletes: "+infos.info(i));
+          NRT.nrt.ramInfosDeletes.add(infos.info(i));
+          NRT.nrt.ramInfosDeletesStacktraces.add(Thread.currentThread().getStackTrace());
+        }
       SegmentReader reader = writer.readerPool.get(infos.info(i), false);
       try {
         any |= applyDeletes(reader, docStart);
@@ -953,6 +958,7 @@
       } finally {
         writer.readerPool.release(reader);
       }
+      }
     }
 
     deletesFlushed.clear();
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 833130)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -17,31 +17,31 @@
  * limitations under the License.
  */
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DocumentsWriter.IndexingChain;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.util.Constants;
 
-import java.io.IOException;
-import java.io.Closeable;
-import java.io.PrintStream;
-import java.util.List;
-import java.util.Collection;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Iterator;
-import java.util.Map;
-
 /**
   An <code>IndexWriter</code> creates and maintains an index.
 
@@ -253,10 +253,10 @@
   private SegmentInfos localRollbackSegmentInfos;      // segmentInfos we will fallback to if the commit fails
   private int localFlushedDocCount;               // saved docWriter.getFlushedDocCount during local transaction
 
-  private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
+  SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
   private DocumentsWriter docWriter;
-  private IndexFileDeleter deleter;
+  IndexFileDeleter deleter;
 
   private Set<SegmentInfo> segmentsToOptimize = new HashSet<SegmentInfo>();           // used by optimize to note those needing optimization
 
@@ -266,15 +266,16 @@
 
   private boolean closed;
   private boolean closing;
+  String name;
 
   // Holds all SegmentInfo instances currently involved in
   // merges
-  private HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
+  HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
 
   private MergePolicy mergePolicy = new LogByteSizeMergePolicy(this);
   private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
-  private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
-  private Set<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
+  LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
+  Set<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
   private List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<MergePolicy.OneMerge>();
   private long mergeGen;
   private boolean stopMerges;
@@ -288,6 +289,9 @@
   private Thread writeThread;                     // non-null if any thread holds write lock
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
+  //NRT nrt;
+  //static boolean NRT_ONE = true;
+  //boolean useNRT = NRT_ONE;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -360,7 +364,11 @@
   public IndexReader getReader() throws IOException {
     return getReader(IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
   }
-
+  
+  public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
+    return getReader(termInfosIndexDivisor, true);
+  }
+  
   /** Expert: like {@link #getReader}, except you can
    *  specify which termInfosIndexDivisor should be used for
    *  any newly opened readers.
@@ -374,7 +382,10 @@
    *  memory usage, at the expense of higher latency when
    *  loading a TermInfo.  The default value is 1.  Set this
    *  to -1 to skip loading the terms index entirely. */
-  public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
+  public IndexReader getReader(int termInfosIndexDivisor, boolean isNRT) throws IOException {
+    //if (nrt != null && isNRT) {
+    //  return nrt.getReader(termInfosIndexDivisor);
+    //}
     if (infoStream != null) {
       message("flush at getReader");
     }
@@ -424,6 +435,11 @@
     // used only by asserts
     public synchronized boolean infoIsLive(SegmentInfo info) {
       int idx = segmentInfos.indexOf(info);
+      if (idx == -1) {
+        if (NRT.nrt.ramNrtRemovedInfos.contains(info)) {
+          System.err.println("infoIsLive failure in ramNrtRemovedInfos:"+info);
+        }
+      }
       assert idx != -1;
       assert segmentInfos.get(idx) == info;
       return true;
@@ -466,10 +482,21 @@
 
         // We are the last ref to this reader; since we're
         // not pooling readers, we release it:
+        boolean contains = readerMap.containsKey(sr.getSegmentInfo());
         readerMap.remove(sr.getSegmentInfo());
+        
+        if (sr.hasChanges) {
+          int idx = NRT.nrt.ramInfosDeletes.indexOf(sr.getSegmentInfo());
+          System.err.println(name+" sr has changes: "+sr.getSegmentInfo()+" sr: "+contains+" idx:"+idx+" "+NRT.nrt.ramInfosDeletes.size());
+          
+          StackTraceElement[] stes = (StackTraceElement[])NRT.nrt.ramInfosDeletesStacktraces.get(idx);
+          for (StackTraceElement ste : stes) {
+            System.out.println(""+ste);
+          }
+        }
+        assert !sr.hasChanges; 
+        //assert Thread.holdsLock(IndexWriter.this);
 
-        assert !sr.hasChanges || Thread.holdsLock(IndexWriter.this);
-
         // Drop our ref -- this will commit any pending
         // changes to the dir
         boolean success = false;
@@ -520,6 +547,9 @@
         // near real-time reader is kept open after the
         // IndexWriter instance is closed
         sr.decRef();
+        if (sr.getRefCount() > 0) {
+          System.out.println(name+" "+sr.getSegmentInfo()+" close ref count:"+sr.getRefCount());
+        }
       }
     }
     
@@ -851,10 +881,16 @@
    *  <code>false</code> or if there is any other low-level
    *  IO error
    */
-  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
+  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl, boolean useNRT)
        throws CorruptIndexException, LockObtainFailedException, IOException {
+    //this.useNRT = false;
     init(d, a, create, null, mfl.getLimit(), null, null);
   }
+  
+  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, create, null, mfl.getLimit(), null, null);
+  }
 
   /**
    * Constructs an IndexWriter for the index in
@@ -1020,6 +1056,13 @@
     analyzer = a;
     setMessageID(defaultInfoStream);
     this.maxFieldLength = maxFieldLength;
+    
+    //if (useNRT) {
+    //  RAMDirectory ramDir = new RAMDirectory();
+    //  IndexWriter ramWriter = new IndexWriter(ramDir, a, true,
+    //      new IndexWriter.MaxFieldLength(maxFieldLength), false);
+      //nrt = new NRT(ramWriter, this, (long)(DEFAULT_RAM_BUFFER_SIZE_MB * 1024 *1024));
+    //}
 
     if (indexingChain == null)
       indexingChain = DocumentsWriter.DefaultIndexingChain;
@@ -1361,6 +1404,9 @@
     if (mb == DISABLE_AUTO_FLUSH && getMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
       throw new IllegalArgumentException(
           "at least one of ramBufferSize and maxBufferedDocs must be enabled");
+    //if (nrt != null) {
+    //  nrt.maxRam = (long)(mb * 1024 * 1024);
+    //}
     docWriter.setRAMBufferSizeMB(mb);
     if (infoStream != null)
       message("setRAMBufferSizeMB " + mb);
@@ -1912,6 +1958,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.addDocument(doc, analyzer);
+    //  return;
+    //}
     ensureOpen();
     boolean doFlush = false;
     boolean success = false;
@@ -1955,6 +2005,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(term);
+    //  return;
+    //}
     ensureOpen();
     try {
       boolean doFlush = docWriter.bufferDeleteTerm(term);
@@ -1979,6 +2033,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(terms);
+    //  return;
+    //}
     ensureOpen();
     try {
       boolean doFlush = docWriter.bufferDeleteTerms(terms);
@@ -2001,6 +2059,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(query);
+    // return;
+   // }
     ensureOpen();
     boolean doFlush = docWriter.bufferDeleteQuery(query);
     if (doFlush)
@@ -2021,6 +2083,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(queries);
+    //  return;
+    //}
     ensureOpen();
     boolean doFlush = docWriter.bufferDeleteQueries(queries);
     if (doFlush)
@@ -2069,6 +2135,10 @@
    */
   public void updateDocument(Term term, Document doc, Analyzer analyzer)
       throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.updateDocument(term, doc, analyzer);
+    //  return;
+    //}
     ensureOpen();
     try {
       boolean doFlush = false;
@@ -2875,7 +2945,10 @@
     while(pendingMerges.size() > 0 || runningMerges.size() > 0) {
       doWait();
     }
-
+    if (mergingSegments.size() > 0) {
+      System.out.println("mergingSegments more than 0: "+mergingSegments);
+    }
+    
     // sanity check
     assert 0 == mergingSegments.size();
   }
@@ -3060,7 +3133,15 @@
   private boolean hasExternalSegments() {
     return segmentInfos.hasExternalSegments(directory);
   }
-
+  
+  synchronized SegmentInfos getSegmentInfosCopy() {
+    SegmentInfos copy = new SegmentInfos();
+    for (SegmentInfo info : segmentInfos) {
+      copy.add(info);
+    }
+    return copy;
+  }
+  
   /* If any of our segments are using a directory != ours
    * then we have to either copy them over one by one, merge
    * them (if merge policy has chosen to) or wait until
@@ -3089,7 +3170,7 @@
           info = segmentInfos.info(i);
           if (info.dir != directory) {
             done = false;
-            final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), mergePolicy instanceof LogMergePolicy && getUseCompoundFile());
+            final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(this, segmentInfos.range(i, 1+i), mergePolicy instanceof LogMergePolicy && getUseCompoundFile());
 
             // Returns true if no running merge conflicts
             // with this one (and, records this merge as
@@ -3697,141 +3778,143 @@
    *  delete generation for merge.info).  If no deletes were
    *  flushed, no new deletes file is saved. */
   synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader) throws IOException {
+    synchronized (merge.writer) { 
+      assert testPoint("startCommitMergeDeletes");
 
-    assert testPoint("startCommitMergeDeletes");
+      final SegmentInfos sourceSegments = merge.segments;
 
-    final SegmentInfos sourceSegments = merge.segments;
+      if (infoStream != null)
+        message("commitMergeDeletes " + merge.segString(directory));
 
-    if (infoStream != null)
-      message("commitMergeDeletes " + merge.segString(directory));
+      // Carefully merge deletes that occurred after we
+      // started merging:
+      int docUpto = 0;
+      int delCount = 0;
 
-    // Carefully merge deletes that occurred after we
-    // started merging:
-    int docUpto = 0;
-    int delCount = 0;
+      for(int i=0; i < sourceSegments.size(); i++) {
+        SegmentInfo info = sourceSegments.info(i);
+        int docCount = info.docCount;
+        SegmentReader previousReader = merge.readersClone[i];
+        SegmentReader currentReader = merge.readers[i];
+        if (previousReader.hasDeletions()) {
 
-    for(int i=0; i < sourceSegments.size(); i++) {
-      SegmentInfo info = sourceSegments.info(i);
-      int docCount = info.docCount;
-      SegmentReader previousReader = merge.readersClone[i];
-      SegmentReader currentReader = merge.readers[i];
-      if (previousReader.hasDeletions()) {
+          // There were deletes on this segment when the merge
+          // started.  The merge has collapsed away those
+          // deletes, but, if new deletes were flushed since
+          // the merge started, we must now carefully keep any
+          // newly flushed deletes but mapping them to the new
+          // docIDs.
 
-        // There were deletes on this segment when the merge
-        // started.  The merge has collapsed away those
-        // deletes, but, if new deletes were flushed since
-        // the merge started, we must now carefully keep any
-        // newly flushed deletes but mapping them to the new
-        // docIDs.
-
-        if (currentReader.numDeletedDocs() > previousReader.numDeletedDocs()) {
-          // This means this segment has had new deletes
-          // committed since we started the merge, so we
-          // must merge them:
-          for(int j=0;j<docCount;j++) {
-            if (previousReader.isDeleted(j))
-              assert currentReader.isDeleted(j);
-            else {
-              if (currentReader.isDeleted(j)) {
-                mergeReader.doDelete(docUpto);
-                delCount++;
+          if (currentReader.numDeletedDocs() > previousReader.numDeletedDocs()) {
+            // This means this segment has had new deletes
+            // committed since we started the merge, so we
+            // must merge them:
+            for(int j=0;j<docCount;j++) {
+              if (previousReader.isDeleted(j))
+                assert currentReader.isDeleted(j);
+              else {
+                if (currentReader.isDeleted(j)) {
+                  mergeReader.doDelete(docUpto);
+                  delCount++;
+                }
+                docUpto++;
               }
-              docUpto++;
             }
+          } else {
+            docUpto += docCount - previousReader.numDeletedDocs();
           }
-        } else {
-          docUpto += docCount - previousReader.numDeletedDocs();
-        }
-      } else if (currentReader.hasDeletions()) {
-        // This segment had no deletes before but now it
-        // does:
-        for(int j=0; j<docCount; j++) {
-          if (currentReader.isDeleted(j)) {
-            mergeReader.doDelete(docUpto);
-            delCount++;
+        } else if (currentReader.hasDeletions()) {
+          // This segment had no deletes before but now it
+          // does:
+          for(int j=0; j<docCount; j++) {
+            if (currentReader.isDeleted(j)) {
+              mergeReader.doDelete(docUpto);
+              delCount++;
+            }
+            docUpto++;
           }
-          docUpto++;
-        }
-      } else
-        // No deletes before or after
-        docUpto += info.docCount;
-    }
+        } else
+          // No deletes before or after
+          docUpto += info.docCount;
+      }
 
-    assert mergeReader.numDeletedDocs() == delCount;
+      assert mergeReader.numDeletedDocs() == delCount;
 
-    mergeReader.hasChanges = delCount >= 0;
+      mergeReader.hasChanges = delCount >= 0;
+    }
   }
 
   /* FIXME if we want to support non-contiguous segment merges */
   synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader) throws IOException {
+    synchronized (merge.writer) {
+      assert testPoint("startCommitMerge");
 
-    assert testPoint("startCommitMerge");
+      if (hitOOM) {
+        throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete merge");
+      }
 
-    if (hitOOM) {
-      throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete merge");
-    }
+      if (infoStream != null)
+        message("commitMerge: " + merge.segString(directory) + " index=" + segString());
 
-    if (infoStream != null)
-      message("commitMerge: " + merge.segString(directory) + " index=" + segString());
+      assert merge.registerDone;
 
-    assert merge.registerDone;
+      // If merge was explicitly aborted, or, if rollback() or
+      // rollbackTransaction() had been called since our merge
+      // started (which results in an unqualified
+      // deleter.refresh() call that will remove any index
+      // file that current segments does not reference), we
+      // abort this merge
+      if (merge.isAborted()) {
+        if (infoStream != null)
+          message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
 
-    // If merge was explicitly aborted, or, if rollback() or
-    // rollbackTransaction() had been called since our merge
-    // started (which results in an unqualified
-    // deleter.refresh() call that will remove any index
-    // file that current segments does not reference), we
-    // abort this merge
-    if (merge.isAborted()) {
-      if (infoStream != null)
-        message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
+        deleter.refresh(merge.info.name);
+        return false;
+      }
 
-      deleter.refresh(merge.info.name);
-      return false;
-    }
+      final int start = ensureContiguousMerge(merge);
 
-    final int start = ensureContiguousMerge(merge);
-
-    commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
+      commitMergedDeletes(merge, mergedReader);
+      docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
       
-    // Simple optimization: if the doc store we are using
-    // has been closed and is in now compound format (but
-    // wasn't when we started), then we will switch to the
-    // compound format as well:
-    final String mergeDocStoreSegment = merge.info.getDocStoreSegment(); 
-    if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) {
-      final int size = segmentInfos.size();
-      for(int i=0;i<size;i++) {
-        final SegmentInfo info = segmentInfos.info(i);
-        final String docStoreSegment = info.getDocStoreSegment();
-        if (docStoreSegment != null &&
-            docStoreSegment.equals(mergeDocStoreSegment) && 
-            info.getDocStoreIsCompoundFile()) {
-          merge.info.setDocStoreIsCompoundFile(true);
-          break;
+      // Simple optimization: if the doc store we are using
+      // has been closed and is in now compound format (but
+      // wasn't when we started), then we will switch to the
+      // compound format as well:
+      final String mergeDocStoreSegment = merge.info.getDocStoreSegment(); 
+      if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) {
+        final int size = segmentInfos.size();
+        for(int i=0;i<size;i++) {
+          final SegmentInfo info = segmentInfos.info(i);
+          final String docStoreSegment = info.getDocStoreSegment();
+          if (docStoreSegment != null &&
+              docStoreSegment.equals(mergeDocStoreSegment) && 
+              info.getDocStoreIsCompoundFile()) {
+            merge.info.setDocStoreIsCompoundFile(true);
+            break;
+          }
         }
       }
-    }
 
-    merge.info.setHasProx(merger.hasProx());
+      merge.info.setHasProx(merger.hasProx());
 
-    segmentInfos.subList(start, start + merge.segments.size()).clear();
-    assert !segmentInfos.contains(merge.info);
-    segmentInfos.add(start, merge.info);
+      segmentInfos.subList(start, start + merge.segments.size()).clear();
+      assert !segmentInfos.contains(merge.info);
+      segmentInfos.add(start, merge.info);
 
-    // Must note the change to segmentInfos so any commits
-    // in-flight don't lose it:
-    checkpoint();
+      // Must note the change to segmentInfos so any commits
+      // in-flight don't lose it:
+      checkpoint();
 
-    // If the merged segments had pending changes, clear
-    // them so that they don't bother writing them to
-    // disk, updating SegmentInfo, etc.:
-    readerPool.clear(merge.segments);
+      // If the merged segments had pending changes, clear
+      // them so that they don't bother writing them to
+      // disk, updating SegmentInfo, etc.:
+      merge.writer.readerPool.clear(merge.segments);
 
-    if (merge.optimize)
-      segmentsToOptimize.add(merge.info);
-    return true;
+      if (merge.optimize)
+        segmentsToOptimize.add(merge.info);
+      return true;
+    }
   }
   
   private synchronized void decrefMergeSegments(MergePolicy.OneMerge merge) throws IOException {
@@ -3897,20 +3980,22 @@
         }
       } finally {
         synchronized(this) {
-          mergeFinish(merge);
+          synchronized (merge.writer) {
+            mergeFinish(merge);
 
-          if (!success) {
-            if (infoStream != null)
-              message("hit exception during merge");
-            if (merge.info != null && !segmentInfos.contains(merge.info))
-              deleter.refresh(merge.info.name);
+            if (!success) {
+              if (infoStream != null)
+                message("hit exception during merge");
+              if (merge.info != null && !segmentInfos.contains(merge.info))
+                deleter.refresh(merge.info.name);
+            }
+
+            // This merge (and, generally, any change to the
+            // segments) may now enable new merges, so we call
+            // merge policy & update pending merges.
+            if (success && !merge.isAborted() && !closed && !closing)
+              updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
           }
-
-          // This merge (and, generally, any change to the
-          // segments) may now enable new merges, so we call
-          // merge policy & update pending merges.
-          if (success && !merge.isAborted() && !closed && !closing)
-            updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
         }
       }
     } catch (OutOfMemoryError oom) {
@@ -4148,26 +4233,27 @@
   /** Does fininishing for a merge, which is fast but holds
    *  the synchronized lock on IndexWriter instance. */
   final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
-    
-    // Optimize, addIndexes or finishMerges may be waiting
-    // on merges to finish.
-    notifyAll();
+    synchronized (merge.writer) {
+      // Optimize, addIndexes or finishMerges may be waiting
+      // on merges to finish.
+      notifyAll();
 
-    if (merge.increfDone)
-      decrefMergeSegments(merge);
+      if (merge.increfDone)
+        decrefMergeSegments(merge);
 
-    // It's possible we are called twice, eg if there was an
-    // exception inside mergeInit
-    if (merge.registerDone) {
-      final SegmentInfos sourceSegments = merge.segments;
-      final int end = sourceSegments.size();
-      for(int i=0;i<end;i++)
-        mergingSegments.remove(sourceSegments.info(i));
-      mergingSegments.remove(merge.info);
-      merge.registerDone = false;
+      // It's possible we are called twice, eg if there was an
+      // exception inside mergeInit
+      if (merge.registerDone) {
+        final SegmentInfos sourceSegments = merge.segments;
+        final int end = sourceSegments.size();
+        for(int i=0;i<end;i++)
+          mergingSegments.remove(sourceSegments.info(i));
+        mergingSegments.remove(merge.info);
+        merge.registerDone = false;
+      }
+
+      runningMerges.remove(merge);
     }
-
-    runningMerges.remove(merge);
   }
 
   /** Does the actual (time-consuming) work of the merge,
@@ -4211,7 +4297,7 @@
 
         // Hold onto the "live" reader; we will use this to
         // commit merged deletes
-        SegmentReader reader = merge.readers[i] = readerPool.get(info, merge.mergeDocStores,
+        SegmentReader reader = merge.readers[i] = merge.writer.readerPool.get(info, merge.mergeDocStores,
                                                                  MERGE_READ_BUFFER_SIZE,
                                                                  -1);
 
@@ -4243,10 +4329,12 @@
       if (mergeDocStores && !merge.mergeDocStores) {
         merge.mergeDocStores = true;
         synchronized(this) {
-          if (dss.contains(docWriter.getDocStoreSegment())) {
-            if (infoStream != null)
-              message("now flush at mergeMiddle");
-            doFlush(true, false);
+          synchronized (merge.writer) { 
+            if (dss.contains(docWriter.getDocStoreSegment())) {
+              if (infoStream != null)
+                message("now flush at mergeMiddle");
+              doFlush(true, false);
+            }
           }
         }
 
@@ -4286,38 +4374,40 @@
       success = true;
     } finally {
       synchronized(this) {
-        if (!success) {
-          // Suppress any new exceptions so we throw the
-          // original cause
-          for (int i=0;i<numSegments;i++) {
-            if (merge.readers[i] != null) {
-              try {
-                readerPool.release(merge.readers[i], true);
-              } catch (Throwable t) {
+        synchronized (merge.writer) {
+          if (!success) {
+            // Suppress any new exceptions so we throw the
+            // original cause
+            for (int i=0;i<numSegments;i++) {
+              if (merge.readers[i] != null) {
+                try {
+                  merge.writer.readerPool.release(merge.readers[i], true);
+                } catch (Throwable t) {
+                }
               }
+
+              if (merge.readersClone[i] != null) {
+                try {
+                  merge.readersClone[i].close();
+                } catch (Throwable t) {
+                }
+                // This was a private clone and we had the only reference
+                assert merge.readersClone[i].getRefCount() == 0;
+              }
             }
+          } else {
+            for (int i=0;i<numSegments;i++) {
+              if (merge.readers[i] != null) {
+                merge.writer.readerPool.release(merge.readers[i], true);
+              }
 
-            if (merge.readersClone[i] != null) {
-              try {
+              if (merge.readersClone[i] != null) {
                 merge.readersClone[i].close();
-              } catch (Throwable t) {
+                // This was a private clone and we had the only reference
+                assert merge.readersClone[i].getRefCount() == 0;
               }
-              // This was a private clone and we had the only reference
-              assert merge.readersClone[i].getRefCount() == 0;
             }
           }
-        } else {
-          for (int i=0;i<numSegments;i++) {
-            if (merge.readers[i] != null) {
-              readerPool.release(merge.readers[i], true);
-            }
-
-            if (merge.readersClone[i] != null) {
-              merge.readersClone[i].close();
-              // This was a private clone and we had the only reference
-              assert merge.readersClone[i].getRefCount() == 0;
-            }
-          }
         }
       }
     }
@@ -4516,7 +4606,7 @@
       return true;
     }
   }
-
+  
   private synchronized void doWait() {
     // NOTE: the callers of this method should in theory
     // be able to do simply wait(), but, as a defense
Index: contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java	(revision 833130)
+++ contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java	(working copy)
@@ -160,7 +160,7 @@
           if (last > 1 || !isOptimized(writer, infos.info(0))) {
 
             spec = new MergeSpecification();
-            spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
+            spec.add(new OneMerge(writer, infos.range(0, last), useCompoundFile));
           }
         } else if (last > maxNumSegments) {
 
@@ -218,7 +218,7 @@
       prev = backLink[i][prev];
       int mergeStart = i + prev;
       if((mergeEnd - mergeStart) > 1) {
-        spec.add(new OneMerge(infos.range(mergeStart, mergeEnd), useCompoundFile));
+        spec.add(new OneMerge(writer, infos.range(mergeStart, mergeEnd), useCompoundFile));
       } else {
         if(partialExpunge) {
           SegmentInfo info = infos.info(mergeStart);
@@ -234,7 +234,7 @@
     
     if(partialExpunge && maxDelCount > 0) {
       // expunge deletes
-      spec.add(new OneMerge(infos.range(expungeCandidate, expungeCandidate + 1), useCompoundFile));
+      spec.add(new OneMerge(writer, infos.range(expungeCandidate, expungeCandidate + 1), useCompoundFile));
     }
     
     return spec;
@@ -284,7 +284,7 @@
     for(int i = 0; i < numLargeSegs; i++) {
       SegmentInfo info = infos.info(i);
       if(info.hasDeletions()) {
-        spec.add(new OneMerge(infos.range(i, i + 1), getUseCompoundFile()));        
+        spec.add(new OneMerge(writer, infos.range(i, i + 1), getUseCompoundFile()));        
       }
     }
     return spec;
@@ -322,7 +322,7 @@
       if(totalSmallSegSize < targetSegSize * 2) {
         MergeSpecification spec = findBalancedMerges(infos, numLargeSegs, (numLargeSegs - 1), _partialExpunge);
         if(spec == null) spec = new MergeSpecification(); // should not happen
-        spec.add(new OneMerge(infos.range(numLargeSegs, numSegs), getUseCompoundFile()));
+        spec.add(new OneMerge(writer, infos.range(numLargeSegs, numSegs), getUseCompoundFile()));
         return spec;
       } else {
         return findBalancedMerges(infos, numSegs, numLargeSegs, _partialExpunge);
@@ -337,7 +337,7 @@
         if(size(info) < sizeThreshold) break;
         startSeg++;
       }
-      spec.add(new OneMerge(infos.range(startSeg, numSegs), getUseCompoundFile()));
+      spec.add(new OneMerge(writer, infos.range(startSeg, numSegs), getUseCompoundFile()));
       return spec;
     } else {
       // apply the log merge policy to small segments.
@@ -368,7 +368,7 @@
       }
     }
     if (maxDelCount > 0) {
-      return new OneMerge(infos.range(expungeCandidate, expungeCandidate + 1), getUseCompoundFile());
+      return new OneMerge(writer, infos.range(expungeCandidate, expungeCandidate + 1), getUseCompoundFile());
     }
     return null;
   }
