Index: src/test/org/apache/lucene/index/TestNRT.java
===================================================================
--- src/test/org/apache/lucene/index/TestNRT.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestNRT.java	(revision 0)
@@ -0,0 +1,230 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNRT extends LuceneTestCase {
+  Random random = new Random();
+  
+  public void testSimple() throws Exception {
+    Directory primaryDir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    
+    IndexWriter ramWriter = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    ramWriter.name = "ram";
+    IndexWriter primaryWriter = new IndexWriter(primaryDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    primaryWriter.name = "primary";
+    //primaryWriter.setInfoStream(System.out);
+    NRT nrt = new NRT(ramWriter, primaryWriter, 1024*600);
+    
+    for (int i = 0; i < 100; i++) {
+      primaryWriter.addDocument(TestIndexWriterReader.createDocument(i,
+          "primary", 4));
+    }
+    for (int i = 0; i < 100; i++) {
+      nrt.addDocument(TestIndexWriterReader.createDocument(i, "ram", 4));
+    }
+
+    IndexReader reader = nrt.getReader();
+    assertEquals(200, reader.maxDoc());
+    IndexReader ramReader = ramWriter.getReader();
+    assertEquals(100, ramReader.maxDoc());
+    IndexReader primaryReader = primaryWriter.getReader();
+    assertEquals(100, primaryReader.maxDoc());
+    
+    ramReader.close();
+    primaryReader.close();
+    reader.close();
+
+    nrt.flush();
+    primaryReader = primaryWriter.getReader();
+    assertEquals(200, primaryReader.maxDoc());
+    ramReader = ramWriter.getReader();
+    assertEquals(0, ramReader.maxDoc());
+
+    ramReader.close();
+    primaryReader.close();
+    
+    primaryWriter.close();
+    ramWriter.close();
+    
+    ramDir.close();
+    primaryDir.close();
+  }
+  
+  /**
+   * This tests synchronization of the primary and ram writers.
+   * 
+   * @throws Exception
+   */
+  public void testRandomThreads() throws Exception {
+    long duration = 1000 * 15;
+    AtomicInteger totalAdded = new AtomicInteger(0);
+
+    Directory primaryDir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    
+    IndexWriter ramWriter = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    IndexWriter primaryWriter = new IndexWriter(primaryDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    //primaryWriter.setInfoStream(System.out);
+    NRT nrt = new NRT(ramWriter, primaryWriter, 1024*600);
+
+    GetReadersThread[] grts = new GetReadersThread[3];
+    for (int x = 0; x < grts.length; x++) {
+      grts[x] = new GetReadersThread("getreadersthread" + x, nrt);
+    }
+    AddDocsThread[] adts = new AddDocsThread[3];
+    for (int x = 0; x < adts.length; x++) {
+      adts[x] = new AddDocsThread("adddocsthread " + x, nrt, totalAdded);
+    }
+    for (int x = 0; x < adts.length; x++) {
+      adts[x].start();
+    }
+    for (int x = 0; x < grts.length; x++) {
+      grts[x].start();
+    }
+    long startTime = System.currentTimeMillis();
+    while (true) {
+      if ((System.currentTimeMillis() - startTime) > duration) {
+        break;
+      }
+    }
+    System.out.println(duration+" elapsed");
+    for (int x = 0; x < adts.length; x++) {
+      adts[x].dorun = false;
+    }
+    for (int x = 0; x < grts.length; x++) {
+      grts[x].dorun = false;
+    }
+    for (int x = 0; x < adts.length; x++) {
+      adts[x].waitForThread();
+      //System.out.println("add docs errors "+x);
+      //for (Throwable th : adts[x].errors) { 
+      //  th.printStackTrace();
+      //}
+    }
+    for (int x = 0; x < grts.length; x++) {
+      grts[x].waitForThread();
+    }
+    System.out.println("Completed...");
+    IndexReader reader = nrt.getReader();
+    assertEquals(totalAdded.get(), reader.maxDoc());
+    reader.close();
+    
+    primaryWriter.close();
+    ramWriter.close();
+    
+    ramDir.close();
+    primaryDir.close();
+    
+    System.out.println("Closed...");
+  }
+
+  public class GenericThread extends Thread {
+    boolean dorun = true;
+
+    List<Throwable> errors = new ArrayList<Throwable>();
+
+    NRT ramnrt;
+
+    public GenericThread(String threadName, NRT ramnrt) {
+      this.ramnrt = ramnrt;
+      setName(threadName);
+    }
+    
+    public void waitForThread() throws InterruptedException {
+      System.out.println("waiting for "+getName());
+      join();
+    }
+  }
+
+  /**
+   * Check to insure the readers are not identical
+   */
+  public class GetReadersThread extends GenericThread {
+    List<String> failures = new ArrayList<String>();
+
+    public GetReadersThread(String threadName, NRT ramnrt) {
+      super(threadName, ramnrt);
+    }
+
+    public void run() {
+      try {
+        while (dorun) {
+          int num = random.nextInt(100);
+          for (int x=0; x < num; x++) {
+            if (!dorun) return;
+            IndexReader reader = ramnrt.getReader();
+            Thread.sleep(random.nextInt(1000));
+            reader.close();
+          }
+        }
+      } catch (Throwable ex) {
+        ex.printStackTrace();
+        errors.add(ex);
+        if (errors.size() > 5) {
+          throw new RuntimeException(ex);
+        }
+      }
+    }
+  }
+
+  public class AddDocsThread extends GenericThread {
+    AtomicInteger totalAdded;
+
+    public AddDocsThread(String threadName, NRT ramnrt,
+        AtomicInteger totalAdded) {
+      super(threadName, ramnrt);
+      this.totalAdded = totalAdded;
+    }
+
+    public void run() {
+      try {
+        while (dorun) {
+          int numdocs = random.nextInt(1000);
+          for (int x = 0; x < numdocs; x++) {
+            if (!dorun)
+              return;
+            ramnrt.addDocument(TestIndexWriterReader.createDocument(x, "", 3));
+            totalAdded.incrementAndGet();
+          }
+          System.out.println("added " + numdocs + " docs");
+          ramnrt.flush();
+          System.out.println("added " + numdocs + " docs after flush");
+        }
+      } catch (Throwable th) {
+        th.printStackTrace();
+        errors.add(th);
+      }
+    }
+  }
+}
Index: src/test/org/apache/lucene/index/TestNRTReaderWithThreads2.java
===================================================================
--- src/test/org/apache/lucene/index/TestNRTReaderWithThreads2.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestNRTReaderWithThreads2.java	(revision 0)
@@ -0,0 +1,120 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.TestIndexWriterReader.HeavyAtomicInt;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestNRTReaderWithThreads2 extends LuceneTestCase {
+  Random random = new Random();
+  HeavyAtomicInt seq = new HeavyAtomicInt(1);
+
+  public void testIndexing() throws Exception {
+    Directory primaryDir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    
+    IndexWriter ramWriter = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    ramWriter.name = "ram";
+    ramWriter.setUseCompoundFile(false);
+    IndexWriter primaryWriter = new IndexWriter(primaryDir, new WhitespaceAnalyzer(), true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    primaryWriter.name = "primary";
+    //primaryWriter.setInfoStream(System.out);
+    NRT nrt = new NRT(ramWriter, primaryWriter, 1024*1024);
+    
+    IndexReader reader = ramWriter.getReader(); // start pooling readers
+    reader.close();
+    RunThread[] indexThreads = new RunThread[4];
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x] = new RunThread(x % 2, nrt);
+      indexThreads[x].setName("Thread " + x);
+      indexThreads[x].start();
+    }    
+    long startTime = System.currentTimeMillis();
+    long duration = 60*1000;
+    while ((System.currentTimeMillis() - startTime) < duration) {
+      Thread.sleep(100);
+    }
+    int delCount = 0;
+    int addCount = 0;
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x].run = false;
+      assertTrue(indexThreads[x].ex == null);
+      addCount += indexThreads[x].addCount;
+      delCount += indexThreads[x].delCount;
+    }
+    for (int x=0; x < indexThreads.length; x++) {
+      indexThreads[x].join();
+    }
+    ramWriter.close();
+    primaryWriter.close();
+    
+    ramDir.close();
+    primaryDir.close();
+  }
+
+  public class RunThread extends Thread {
+    NRT nrt;
+    boolean run = true;
+    Throwable ex;
+    int delCount = 0;
+    int addCount = 0;
+    int type;
+
+    public RunThread(int type, NRT nrt) {
+      this.type = type;
+      this.nrt = nrt;
+    }
+
+    public void run() {
+      try {
+        while (run) {
+          //int n = random.nextInt(2);
+          if (type == 0) {
+            int i = seq.addAndGet(1);
+            Document doc = TestIndexWriterReader.createDocument(i, "index1", 10);
+            nrt.addDocument(doc);
+            addCount++;
+          } else if (type == 1) {
+            // we may or may not delete because the term may not exist,
+            // however we're opening and closing the reader rapidly
+            IndexReader reader = nrt.getReader();
+            int id = random.nextInt(seq.intValue());
+            Term term = new Term("id", Integer.toString(id));
+            int count = TestIndexWriterReader.count(term, reader);
+            nrt.deleteDocuments(term);
+            reader.close();
+            delCount += count;
+          }
+        }
+      } catch (Throwable ex) {
+        ex.printStackTrace(System.out);
+        this.ex = ex;
+        run = false;
+      }
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/MergePolicy.java	(revision 833130)
+++ src/java/org/apache/lucene/index/MergePolicy.java	(working copy)
@@ -85,10 +85,12 @@
     final boolean useCompoundFile;
     boolean aborted;
     Throwable error;
+    IndexWriter writer;
 
-    public OneMerge(SegmentInfos segments, boolean useCompoundFile) {
+    public OneMerge(IndexWriter writer, SegmentInfos segments, boolean useCompoundFile) {
       if (0 == segments.size())
         throw new RuntimeException("segments must include at least one segment");
+      this.writer = writer;
       this.segments = segments;
       this.useCompoundFile = useCompoundFile;
     }
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java	(revision 833130)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java	(working copy)
@@ -242,7 +242,7 @@
         // First, enroll all "full" merges (size
         // mergeFactor) to potentially be run concurrently:
         while (last - maxNumSegments + 1 >= mergeFactor) {
-          spec.add(new OneMerge(infos.range(last-mergeFactor, last), useCompoundFile));
+          spec.add(new OneMerge(writer, infos.range(last-mergeFactor, last), useCompoundFile));
           last -= mergeFactor;
         }
 
@@ -254,7 +254,7 @@
             // Since we must optimize down to 1 segment, the
             // choice is simple:
             if (last > 1 || !isOptimized(infos.info(0)))
-              spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
+              spec.add(new OneMerge(writer, infos.range(0, last), useCompoundFile));
           } else if (last > maxNumSegments) {
 
             // Take care to pick a partial merge that is
@@ -282,7 +282,7 @@
               }
             }
 
-            spec.add(new OneMerge(infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile));
+            spec.add(new OneMerge(writer, infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile));
           }
         }
         
@@ -322,7 +322,7 @@
           // deletions, so force a merge now:
           if (verbose())
             message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
-          spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
+          spec.add(new OneMerge(writer, segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
           firstSegmentWithDeletions = i;
         }
       } else if (firstSegmentWithDeletions != -1) {
@@ -331,7 +331,7 @@
         // mergeFactor segments
         if (verbose())
           message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
-        spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
+        spec.add(new OneMerge(writer, segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
         firstSegmentWithDeletions = -1;
       }
     }
@@ -339,7 +339,7 @@
     if (firstSegmentWithDeletions != -1) {
       if (verbose())
         message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
-      spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
+      spec.add(new OneMerge(writer, segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
     }
 
     return spec;
@@ -439,7 +439,7 @@
             spec = new MergeSpecification();
           if (verbose())
             message("    " + start + " to " + end + ": add this merge");
-          spec.add(new OneMerge(infos.range(start, end), useCompoundFile));
+          spec.add(new OneMerge(writer, infos.range(start, end), useCompoundFile));
         } else if (verbose())
           message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
 
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(revision 833130)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(working copy)
@@ -99,7 +99,7 @@
   
   private void message(String message) {
     if (verbose())
-      writer.message("CMS: " + message);
+      writer.message(writer.name+" CMS: " + message);
   }
 
   private synchronized void initMergeThreadPriority() {
@@ -241,7 +241,7 @@
     final MergeThread thread = new MergeThread(writer, merge);
     thread.setThreadPriority(mergeThreadPriority);
     thread.setDaemon(true);
-    thread.setName("Lucene Merge Thread #" + mergeThreadCount++);
+    thread.setName(writer.name+" Lucene Merge Thread #" + mergeThreadCount++);
     return thread;
   }
 
Index: src/java/org/apache/lucene/index/NRT.java
===================================================================
--- src/java/org/apache/lucene/index/NRT.java	(revision 0)
+++ src/java/org/apache/lucene/index/NRT.java	(revision 0)
@@ -0,0 +1,185 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.RAMDirectory;
+
+public class NRT {
+  private IndexWriter ramWriter;
+
+  private IndexWriter primaryWriter;
+
+  RAMDirectory ramDirectory;
+
+  long maxRam;
+
+  public NRT(IndexWriter ramWriter, IndexWriter primaryWriter, long maxRam) {
+    this.ramWriter = ramWriter;
+    ramDirectory = (RAMDirectory) ramWriter.getDirectory();
+    this.primaryWriter = primaryWriter;
+    this.maxRam = maxRam;
+  }
+
+  /**
+   * Flush the RAM segments and buffer to the primary writer.
+   * 
+   * @throws IOException
+   */
+  public synchronized void flush() throws IOException {
+    ramWriter.flush(false, true, true);
+    SegmentInfos ramInfos = null;
+    synchronized (ramWriter) {
+      ramInfos = ramWriter.getSegmentInfosCopy();
+      // adding ram infos to mergingSegments
+      // keeps them from being merged in ram writer
+      for (SegmentInfo ramInfo : ramInfos) {
+        ramWriter.mergingSegments.add(ramInfo);
+      }
+    }
+    if (ramInfos.size() > 0) {
+      MergePolicy.OneMerge primaryMerge = new MergePolicy.OneMerge(ramWriter,
+          ramInfos, primaryWriter.getUseCompoundFile());
+      MergePolicy.OneMerge ramMerge = new MergePolicy.OneMerge(ramWriter,
+          ramInfos, primaryWriter.getUseCompoundFile());
+      ramMerge.registerDone = true;
+
+      synchronized (primaryWriter) {
+        for (SegmentInfo info : primaryMerge.segments) {
+          primaryWriter.segmentInfos.add(info);
+        }
+        if (primaryWriter.registerMerge(primaryMerge)) {
+          primaryWriter.pendingMerges.remove(primaryMerge);
+          primaryWriter.runningMerges.add(primaryMerge);
+        }
+      }
+      primaryWriter.merge(primaryMerge);
+      // remove the merged infos from the ram writer
+      synchronized (primaryWriter) {
+      synchronized (ramWriter) {
+        for (SegmentInfo info : ramInfos) {
+          assert !ramWriter.mergingSegments.contains(info);
+          assert !primaryWriter.mergingSegments.contains(info);
+          assert !primaryWriter.segmentInfos.contains(info);
+          ramWriter.segmentInfos.remove(info);
+          ramWriter.mergeFinish(ramMerge);
+        }
+        ramWriter.deleter.checkpoint(ramWriter.segmentInfos, false);
+      }
+      }
+    }
+  }
+  
+  public IndexReader getReader() throws IOException {
+    return getReader(IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+  }
+  
+  public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
+    IndexReader[] ramReaders = ramWriter.getReader(termInfosIndexDivisor, false).getSequentialSubReaders();
+    IndexReader[] primaryReaders = primaryWriter.getReader(termInfosIndexDivisor, false)
+        .getSequentialSubReaders();
+    HashSet<IndexReader> readers = new HashSet<IndexReader>(ramReaders.length
+        + primaryReaders.length);
+    int i = 0;
+    Set<SegmentInfo> infos = new HashSet<SegmentInfo>();
+    for (int x = 0; x < ramReaders.length; x++) {
+      SegmentInfo info = ((SegmentReader) ramReaders[x]).getSegmentInfo();
+      if (!infos.contains(info)) {
+        infos.add(info);
+        readers.add(ramReaders[x]);
+      }
+    }
+    for (int x = 0; x < primaryReaders.length; x++) {
+      SegmentInfo info = ((SegmentReader) primaryReaders[x]).getSegmentInfo();
+      infos.add(info);
+      readers.add(primaryReaders[x]);
+    }
+    // make sure there are no duplicate segment infos
+    // between the writers
+    assert infos.size() == ramReaders.length + primaryReaders.length;
+    MultiReader reader = new MultiReader((IndexReader[]) readers
+        .toArray(new IndexReader[0]));
+    return reader;
+  }
+
+  public long getRAMSize() {
+    return ramWriter.ramSizeInBytes() + ramDirectory.sizeInBytes();
+  }
+
+  public void addDocument(Document doc) throws IOException {
+    addDocument(doc, primaryWriter.getAnalyzer());
+  }
+
+  public void addDocument(Document doc, Analyzer analyzer) throws IOException {
+    ramWriter.addDocument(doc, analyzer);
+    ifFlush();
+  }
+
+  public void deleteDocuments(Term... terms) throws CorruptIndexException,
+      IOException {
+    ramWriter.deleteDocuments(terms);
+    primaryWriter.deleteDocuments(terms);
+    ifFlush();
+  }
+
+  public void deleteDocuments(Query query) throws CorruptIndexException,
+      IOException {
+    ramWriter.deleteDocuments(query);
+    primaryWriter.deleteDocuments(query);
+    ifFlush();
+  }
+
+  public void deleteDocuments(Query... queries) throws CorruptIndexException,
+      IOException {
+    ramWriter.deleteDocuments(queries);
+    primaryWriter.deleteDocuments(queries);
+    ifFlush();
+  }
+
+  public void updateDocument(Term term, Document doc, Analyzer analyzer)
+      throws CorruptIndexException, IOException {
+    ramWriter.updateDocument(term, doc, analyzer);
+    primaryWriter.deleteDocuments(term);
+    ifFlush();
+  }
+
+  public void updateDocument(Term term, Document doc)
+      throws CorruptIndexException, IOException {
+    ramWriter.updateDocument(term, doc);
+    primaryWriter.deleteDocuments(term);
+    ifFlush();
+  }
+
+  private void ifFlush() throws IOException {
+    if (getRAMSize() > maxRam) {
+      long ramSizeBytes = ramWriter.ramSizeInBytes();
+      long ramDirBytes = ramDirectory.sizeInBytes();
+      // if (infoStream != null)
+      // infoStream.println("ramSize:"+ramSizeBytes+" ramDir:"+ramDirBytes);
+      flush();
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 833130)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -944,8 +944,8 @@
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
-
+      //assert infos.info(i).dir == directory;
+      if (infos.info(i).dir == directory) {
       SegmentReader reader = writer.readerPool.get(infos.info(i), false);
       try {
         any |= applyDeletes(reader, docStart);
@@ -953,6 +953,7 @@
       } finally {
         writer.readerPool.release(reader);
       }
+      }
     }
 
     deletesFlushed.clear();
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 833130)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -17,31 +17,33 @@
  * limitations under the License.
  */
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DocumentsWriter.IndexingChain;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Constants;
 
-import java.io.IOException;
-import java.io.Closeable;
-import java.io.PrintStream;
-import java.util.List;
-import java.util.Collection;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Iterator;
-import java.util.Map;
-
 /**
   An <code>IndexWriter</code> creates and maintains an index.
 
@@ -253,10 +255,10 @@
   private SegmentInfos localRollbackSegmentInfos;      // segmentInfos we will fallback to if the commit fails
   private int localFlushedDocCount;               // saved docWriter.getFlushedDocCount during local transaction
 
-  private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
+  SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
   private DocumentsWriter docWriter;
-  private IndexFileDeleter deleter;
+  IndexFileDeleter deleter;
 
   private Set<SegmentInfo> segmentsToOptimize = new HashSet<SegmentInfo>();           // used by optimize to note those needing optimization
 
@@ -266,15 +268,16 @@
 
   private boolean closed;
   private boolean closing;
+  String name;
 
   // Holds all SegmentInfo instances currently involved in
   // merges
-  private HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
+  HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
 
   private MergePolicy mergePolicy = new LogByteSizeMergePolicy(this);
   private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
-  private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
-  private Set<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
+  LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
+  Set<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
   private List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<MergePolicy.OneMerge>();
   private long mergeGen;
   private boolean stopMerges;
@@ -288,6 +291,9 @@
   private Thread writeThread;                     // non-null if any thread holds write lock
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
+  //NRT nrt;
+  //static boolean NRT_ONE = true;
+  //boolean useNRT = NRT_ONE;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -360,7 +366,11 @@
   public IndexReader getReader() throws IOException {
     return getReader(IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
   }
-
+  
+  public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
+    return getReader(termInfosIndexDivisor, true);
+  }
+  
   /** Expert: like {@link #getReader}, except you can
    *  specify which termInfosIndexDivisor should be used for
    *  any newly opened readers.
@@ -374,7 +384,10 @@
    *  memory usage, at the expense of higher latency when
    *  loading a TermInfo.  The default value is 1.  Set this
    *  to -1 to skip loading the terms index entirely. */
-  public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
+  public IndexReader getReader(int termInfosIndexDivisor, boolean isNRT) throws IOException {
+    //if (nrt != null && isNRT) {
+    //  return nrt.getReader(termInfosIndexDivisor);
+    //}
     if (infoStream != null) {
       message("flush at getReader");
     }
@@ -520,6 +533,9 @@
         // near real-time reader is kept open after the
         // IndexWriter instance is closed
         sr.decRef();
+        if (sr.getRefCount() > 0) {
+          System.out.println(name+" "+sr.getSegmentInfo()+" close ref count:"+sr.getRefCount());
+        }
       }
     }
     
@@ -851,10 +867,16 @@
    *  <code>false</code> or if there is any other low-level
    *  IO error
    */
-  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
+  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl, boolean useNRT)
        throws CorruptIndexException, LockObtainFailedException, IOException {
+    //this.useNRT = false;
     init(d, a, create, null, mfl.getLimit(), null, null);
   }
+  
+  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, create, null, mfl.getLimit(), null, null);
+  }
 
   /**
    * Constructs an IndexWriter for the index in
@@ -1020,6 +1042,13 @@
     analyzer = a;
     setMessageID(defaultInfoStream);
     this.maxFieldLength = maxFieldLength;
+    
+    //if (useNRT) {
+    //  RAMDirectory ramDir = new RAMDirectory();
+    //  IndexWriter ramWriter = new IndexWriter(ramDir, a, true,
+    //      new IndexWriter.MaxFieldLength(maxFieldLength), false);
+      //nrt = new NRT(ramWriter, this, (long)(DEFAULT_RAM_BUFFER_SIZE_MB * 1024 *1024));
+    //}
 
     if (indexingChain == null)
       indexingChain = DocumentsWriter.DefaultIndexingChain;
@@ -1361,6 +1390,9 @@
     if (mb == DISABLE_AUTO_FLUSH && getMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
       throw new IllegalArgumentException(
           "at least one of ramBufferSize and maxBufferedDocs must be enabled");
+    //if (nrt != null) {
+    //  nrt.maxRam = (long)(mb * 1024 * 1024);
+    //}
     docWriter.setRAMBufferSizeMB(mb);
     if (infoStream != null)
       message("setRAMBufferSizeMB " + mb);
@@ -1912,6 +1944,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.addDocument(doc, analyzer);
+    //  return;
+    //}
     ensureOpen();
     boolean doFlush = false;
     boolean success = false;
@@ -1955,6 +1991,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(term);
+    //  return;
+    //}
     ensureOpen();
     try {
       boolean doFlush = docWriter.bufferDeleteTerm(term);
@@ -1979,6 +2019,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(terms);
+    //  return;
+    //}
     ensureOpen();
     try {
       boolean doFlush = docWriter.bufferDeleteTerms(terms);
@@ -2001,6 +2045,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(query);
+    // return;
+   // }
     ensureOpen();
     boolean doFlush = docWriter.bufferDeleteQuery(query);
     if (doFlush)
@@ -2021,6 +2069,10 @@
    * @throws IOException if there is a low-level IO error
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.deleteDocuments(queries);
+    //  return;
+    //}
     ensureOpen();
     boolean doFlush = docWriter.bufferDeleteQueries(queries);
     if (doFlush)
@@ -2069,6 +2121,10 @@
    */
   public void updateDocument(Term term, Document doc, Analyzer analyzer)
       throws CorruptIndexException, IOException {
+    //if (nrt != null) {
+    //  nrt.updateDocument(term, doc, analyzer);
+    //  return;
+    //}
     ensureOpen();
     try {
       boolean doFlush = false;
@@ -2875,7 +2931,10 @@
     while(pendingMerges.size() > 0 || runningMerges.size() > 0) {
       doWait();
     }
-
+    if (mergingSegments.size() > 0) {
+      System.out.println("mergingSegments more than 0: "+mergingSegments);
+    }
+    
     // sanity check
     assert 0 == mergingSegments.size();
   }
@@ -3060,7 +3119,28 @@
   private boolean hasExternalSegments() {
     return segmentInfos.hasExternalSegments(directory);
   }
-
+  /**
+  final void mergeIn(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException {
+    synchronized (this) {
+      for (SegmentInfo info : merge.segments) {
+        segmentInfos.add(info);
+      }
+      if (registerMerge(merge)) {
+        pendingMerges.remove(merge);
+        runningMerges.add(merge);
+      }
+    }
+    merge(merge);
+  }
+  **/
+  synchronized SegmentInfos getSegmentInfosCopy() {
+    SegmentInfos copy = new SegmentInfos();
+    for (SegmentInfo info : segmentInfos) {
+      copy.add(info);
+    }
+    return copy;
+  }
+  
   /* If any of our segments are using a directory != ours
    * then we have to either copy them over one by one, merge
    * them (if merge policy has chosen to) or wait until
@@ -3089,7 +3169,7 @@
           info = segmentInfos.info(i);
           if (info.dir != directory) {
             done = false;
-            final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), mergePolicy instanceof LogMergePolicy && getUseCompoundFile());
+            final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(this, segmentInfos.range(i, 1+i), mergePolicy instanceof LogMergePolicy && getUseCompoundFile());
 
             // Returns true if no running merge conflicts
             // with this one (and, records this merge as
@@ -3697,7 +3777,7 @@
    *  delete generation for merge.info).  If no deletes were
    *  flushed, no new deletes file is saved. */
   synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader) throws IOException {
-
+    synchronized (merge.writer) { 
     assert testPoint("startCommitMergeDeletes");
 
     final SegmentInfos sourceSegments = merge.segments;
@@ -3760,11 +3840,12 @@
     assert mergeReader.numDeletedDocs() == delCount;
 
     mergeReader.hasChanges = delCount >= 0;
+    }
   }
 
   /* FIXME if we want to support non-contiguous segment merges */
   synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader) throws IOException {
-
+    synchronized (merge.writer) {
     assert testPoint("startCommitMerge");
 
     if (hitOOM) {
@@ -3827,11 +3908,12 @@
     // If the merged segments had pending changes, clear
     // them so that they don't bother writing them to
     // disk, updating SegmentInfo, etc.:
-    readerPool.clear(merge.segments);
+    merge.writer.readerPool.clear(merge.segments);
 
     if (merge.optimize)
       segmentsToOptimize.add(merge.info);
     return true;
+    }
   }
   
   private synchronized void decrefMergeSegments(MergePolicy.OneMerge merge) throws IOException {
@@ -4211,7 +4293,7 @@
 
         // Hold onto the "live" reader; we will use this to
         // commit merged deletes
-        SegmentReader reader = merge.readers[i] = readerPool.get(info, merge.mergeDocStores,
+        SegmentReader reader = merge.readers[i] = merge.writer.readerPool.get(info, merge.mergeDocStores,
                                                                  MERGE_READ_BUFFER_SIZE,
                                                                  -1);
 
@@ -4292,7 +4374,7 @@
           for (int i=0;i<numSegments;i++) {
             if (merge.readers[i] != null) {
               try {
-                readerPool.release(merge.readers[i], true);
+                merge.writer.readerPool.release(merge.readers[i], true);
               } catch (Throwable t) {
               }
             }
@@ -4309,7 +4391,7 @@
         } else {
           for (int i=0;i<numSegments;i++) {
             if (merge.readers[i] != null) {
-              readerPool.release(merge.readers[i], true);
+              merge.writer.readerPool.release(merge.readers[i], true);
             }
 
             if (merge.readersClone[i] != null) {
@@ -4516,7 +4598,7 @@
       return true;
     }
   }
-
+  
   private synchronized void doWait() {
     // NOTE: the callers of this method should in theory
     // be able to do simply wait(), but, as a defense
