Index: CHANGES.txt
===================================================================
--- CHANGES.txt	(revision 771171)
+++ CHANGES.txt	(working copy)
@@ -291,10 +291,16 @@
     index files in a RAMDirectory. (Jason Rutherglen via Mike
     McCandless)
 
-23. LUCENE-1494: Added FieldMaskingSpanQuery which can be used to
+24. LUCENE-1494: Added FieldMaskingSpanQuery which can be used to
     cross-correlate Spans from different fields.
     (Paul Cowan and Chris Hostetter)
     
+25. LUCENE-1313: Near realtime search enhancement.  IndexWriter
+    accepts a RAMDirectory that segments are flushed to (instead of 
+    to the real directory).  This can be faster than flushing and 
+    merging small segments using only an FSDirectory.  
+    (Jason Rutherglen via Mike McCandless)
+    
 Optimizations
 
  1. LUCENE-1427: Fixed QueryWrapperFilter to not waste time computing
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(revision 771171)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(working copy)
@@ -41,8 +41,6 @@
   // Max number of threads allowed to be merging at once
   private int maxThreadCount = 3;
 
-  protected Directory dir;
-
   private boolean closed;
   protected IndexWriter writer;
   protected int mergeThreadCount;
@@ -145,7 +143,15 @@
         count++;
     return count;
   }
-
+  
+  private String printDirs(Directory[] dirs, IndexWriter writer) {
+    List list = new ArrayList();
+    for (int x=0; x < dirs.length; x++) {
+      list.add(writer.printDir(dirs[x]));
+    }
+    return list.toString();
+  }
+  
   public void merge(IndexWriter writer)
     throws CorruptIndexException, IOException {
 
@@ -156,8 +162,6 @@
 
     initMergeThreadPriority();
 
-    dir = writer.getDirectory();
-
     // First, quickly run through the newly proposed merges
     // and add any orthogonal merges (ie a merge not
     // involving segments already pending to be merged) to
@@ -177,14 +181,14 @@
       // TODO: we could be careful about which merges to do in
       // the BG (eg maybe the "biggest" ones) vs FG, which
       // merges to do first (the easiest ones?), etc.
-
-      MergePolicy.OneMerge merge = writer.getNextMerge();
+      Directory[] destDirs = getDestinationDirs(writer);
+      MergePolicy.OneMerge merge = writer.getNextMerge(destDirs);
       if (merge == null) {
         if (verbose())
           message("  no more merges pending; now return");
         return;
       }
-
+      assert merge.directory != null;
       // We do this w/ the primary thread to keep
       // deterministic assignment of segment names
       writer.mergeInit(merge);
@@ -207,7 +211,7 @@
           }
 
           if (verbose())
-            message("  consider merge " + merge.segString(dir));
+            message("  consider merge " + merge.segString(merge.directory));
       
           assert mergeThreadCount() < maxThreadCount;
 
@@ -292,11 +296,12 @@
 
           // Subsequent times through the loop we do any new
           // merge that writer says is necessary:
-          merge = writer.getNextMerge();
+          Directory[] destDirs = getDestinationDirs(writer);
+          merge = writer.getNextMerge(destDirs);
           if (merge != null) {
             writer.mergeInit(merge);
             if (verbose())
-              message("  merge thread: do another merge " + merge.segString(dir));
+              message("  merge thread: do another merge " + merge.segString(merge.directory));
           } else
             break;
         }
@@ -312,7 +317,7 @@
             // suppressExceptions is normally only set during
             // testing.
             anyExceptions = true;
-            handleMergeException(exc);
+            handleMergeException(exc, merge.directory);
           }
         }
       } finally {
@@ -328,13 +333,13 @@
       MergePolicy.OneMerge merge = getRunningMerge();
       if (merge == null)
         merge = startMerge;
-      return "merge thread: " + merge.segString(dir);
+      return "merge thread: " + merge.segString(merge.directory);
     }
   }
 
   /** Called when an exception is hit in a background merge
    *  thread */
-  protected void handleMergeException(Throwable exc) {
+  protected void handleMergeException(Throwable exc, Directory dir) {
     try {
       // When an exception is hit during merge, IndexWriter
       // removes any partial files and then allows another
Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(revision 771171)
+++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(working copy)
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.store.Directory;
 
 /**
  * Gathers all Fieldables for a document under the same
@@ -146,7 +147,7 @@
   }
 
   public DocumentsWriter.DocWriter processDocument() throws IOException {
-
+    Directory dir = this.docState.docWriter.writer.getFlushDirectory();
     consumer.startDocument();
     final Document doc = docState.doc;
 
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 771171)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -25,6 +25,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.util.ArrayUtil;
 
@@ -110,7 +111,6 @@
 final class DocumentsWriter {
 
   IndexWriter writer;
-  Directory directory;
 
   String segment;                         // Current segment we are working on
   private String docStoreSegment;         // Current doc-store segment we are writing
@@ -259,8 +259,7 @@
 
   private boolean closed;
 
-  DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain) throws IOException {
-    this.directory = directory;
+  DocumentsWriter(IndexWriter writer, IndexingChain indexingChain) throws IOException {
     this.writer = writer;
     this.similarity = writer.getSimilarity();
     flushedDocCount = writer.maxDoc();
@@ -536,6 +535,7 @@
 
   synchronized private void initFlushState(boolean onlyDocStore) {
     initSegmentName(onlyDocStore);
+    Directory directory = writer.getFlushDirectory();
     flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getTermIndexInterval());
   }
 
@@ -550,6 +550,7 @@
     assert waitQueue.numWaiting == 0;
     assert waitQueue.waitingBytes == 0;
 
+    Directory directory = writer.getFlushDirectory();
     initFlushState(false);
 
     docStoreOffset = numDocsInStore;
@@ -574,7 +575,7 @@
       consumer.flush(threads, flushState);
 
       if (infoStream != null) {
-        final long newSegmentSize = segmentSize(flushState.segmentName);
+        final long newSegmentSize = segmentSize(flushState.segmentName, directory);
         String message = "  oldRAMSize=" + numBytesUsed +
           " newFlushedSize=" + newSegmentSize +
           " docs/MB=" + nf.format(numDocsInRAM/(newSegmentSize/1024./1024.)) +
@@ -600,7 +601,7 @@
   }
 
   /** Build compound file for the segment we just flushed */
-  void createCompoundFile(String segment) throws IOException {
+  void createCompoundFile(String segment, Directory directory) throws IOException {
     
     CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
     Iterator it = flushState.flushedFiles.iterator();
@@ -763,7 +764,7 @@
       // This call is not synchronized and does all the
       // work
       final DocWriter perDoc = state.consumer.processDocument();
-        
+
       // This call is synchronized but fast
       finishDocument(state, perDoc);
       success = true;
@@ -914,11 +915,13 @@
 
     int docStart = 0;
     boolean any = false;
+    Directory flushDir = writer.getFlushDirectory();
+    Directory directory = writer.getDirectory();
     for (int i = 0; i < infosEnd; i++) {
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
+      assert infos.info(i).dir == directory || infos.info(i).dir == flushDir;
 
       SegmentReader reader = writer.readerPool.get(infos.info(i), false);
       try {
@@ -1111,10 +1114,9 @@
 
   // TODO FI: this is not flexible -- we can't hardwire
   // extensions in here:
-  private long segmentSize(String segmentName) throws IOException {
+  private long segmentSize(String segmentName, Directory directory) throws IOException {
     // Used only when infoStream != null
     assert infoStream != null;
-    
     long size = directory.fileLength(segmentName + ".tii") +
       directory.fileLength(segmentName + ".tis") +
       directory.fileLength(segmentName + ".frq") +
@@ -1123,10 +1125,15 @@
     final String normFileName = segmentName + ".nrm";
     if (directory.fileExists(normFileName))
       size += directory.fileLength(normFileName);
-
+    
     return size;
   }
-
+  
+  public long fileLength(String name) throws IOException {
+    Directory dir = writer.getFlushDirectory();
+    return dir.fileLength(name);
+  }
+  
   // Coarse estimates used to measure RAM usage of buffered deletes
   final static int OBJECT_HEADER_BYTES = 8;
   final static int POINTER_NUM_BYTE = 4;
@@ -1276,7 +1283,6 @@
 
     // We flush when we've used our target usage
     final long flushTrigger = (long) ramBufferSize;
-
     if (numBytesAlloc > freeTrigger) {
 
       if (infoStream != null)
@@ -1346,10 +1352,16 @@
       // over-allocating and then freeing, with every
       // flush.
       synchronized(this) {
-
-        if (numBytesUsed > flushTrigger) {
+        long totalUsed = numBytesUsed;
+        if (writer.poolReaders) {
+          // if NRT is on, we need to take into account
+          // flushing to the RAM dir will consume the amount
+          // we're currently using in the ram buffer
+          totalUsed = (numBytesUsed * 2) + writer.getRamDirSize();
+        }
+        if (totalUsed > flushTrigger) {
           if (infoStream != null)
-            message("  RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) +
+            message("  RAM: now flush @ usedMB=" + nf.format(totalUsed/1024./1024.) +
                     " allocMB=" + nf.format(numBytesAlloc/1024./1024.) +
                     " triggerMB=" + nf.format(flushTrigger/1024./1024.));
 
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 771171)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -28,6 +28,8 @@
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.FileSwitchDirectory;
 import org.apache.lucene.util.Constants;
 
 import java.io.File;
@@ -286,6 +288,18 @@
    * set (see {@link #setInfoStream}).
    */
   public final static int MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;
+  
+  /**
+   * File extensions for the FileSwitchDirectory used when NRT is on
+   */
+  public final static Set SWITCH_FILE_EXTS = new HashSet();
+  static {
+    SWITCH_FILE_EXTS.add("fdx");
+    SWITCH_FILE_EXTS.add("fdt");
+    SWITCH_FILE_EXTS.add("tvx");
+    SWITCH_FILE_EXTS.add("tvf");
+    SWITCH_FILE_EXTS.add("tvd");
+  }
 
   /**
    * Default for {@link #getMaxSyncPauseSeconds}.  On
@@ -353,7 +367,9 @@
   private HashSet mergingSegments = new HashSet();
 
   private MergePolicy mergePolicy = new LogByteSizeMergePolicy();
-  private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
+  private MergePolicy ramMergePolicy = mergePolicy;
+  MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
+  MergeScheduler ramMergeScheduler = mergeScheduler;
   private LinkedList pendingMerges = new LinkedList();
   private Set runningMerges = new HashSet();
   private List mergeExceptions = new ArrayList();
@@ -370,6 +386,7 @@
   private Thread writeThread;                     // non-null if any thread holds write lock
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
+  private FileSwitchDirectory switchDirectory;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -380,7 +397,7 @@
   // reuse SegmentReader instances internally for applying
   // deletes, doing merges, and reopening near real-time
   // readers.
-  private volatile boolean poolReaders;
+  volatile boolean poolReaders;
   
   /**
    * Expert: returns a readonly reader containing all
@@ -730,6 +747,17 @@
     else
       throw new IllegalArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
   }
+  
+  /**
+   * Casts current mergePolicy to LogMergePolicy, and throws
+   * an exception if the mergePolicy is not a LogMergePolicy.
+   */
+  private LogMergePolicy getRamLogMergePolicy() {
+    if (ramMergePolicy instanceof LogMergePolicy)
+      return (LogMergePolicy) ramMergePolicy;
+    else
+      throw new IllegalArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
+  }
 
   /** <p>Get the current setting of whether newly flushed
    *  segments will use the compound file format.  Note that
@@ -1261,7 +1289,7 @@
     throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
-  
+
   /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy}, for the index in <code>d</code>.
@@ -1294,7 +1322,7 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, create, false, deletionPolicy, false, mfl.getLimit(), null, null);
   }
-  
+
   /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy} and {@link IndexingChain}, 
@@ -1331,7 +1359,7 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, create, false, deletionPolicy, false, mfl.getLimit(), indexingChain, commit);
   }
-  
+
   /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy}, for the index in <code>d</code>.
@@ -1415,13 +1443,16 @@
       init(d, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
     }
   }
-
+  
   private void init(Directory d, Analyzer a, final boolean create, boolean closeDir, 
                     IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength,
                     IndexingChain indexingChain, IndexCommit commit)
     throws CorruptIndexException, LockObtainFailedException, IOException {
     this.closeDir = closeDir;
     directory = d;
+    this.switchDirectory = new FileSwitchDirectory(SWITCH_FILE_EXTS, directory, new RAMDirectory(), false);
+    getRamLogMergePolicy().setUseCompoundDocStore(false);
+    getRamLogMergePolicy().setUseCompoundFile(false);
     analyzer = a;
     setMessageID(defaultInfoStream);
     this.maxFieldLength = maxFieldLength;
@@ -1493,13 +1524,13 @@
       this.autoCommit = autoCommit;
       setRollbackSegmentInfos(segmentInfos);
 
-      docWriter = new DocumentsWriter(directory, this, indexingChain);
+      docWriter = new DocumentsWriter(this, indexingChain);
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
 
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
-      deleter = new IndexFileDeleter(directory,
+      deleter = new IndexFileDeleter(switchDirectory, 
                                      deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
                                      segmentInfos, infoStream, docWriter);
 
@@ -1523,16 +1554,66 @@
       throw e;
     }
   }
-
+  
+  public Directory getFlushDirectory() {
+    if (poolReaders)
+      return switchDirectory;
+    else return directory;
+  }
+  
+  long getRamDirSize() {
+    try {
+      return size(switchDirectory.getSecondaryDir());
+    } catch (IOException ex) {
+      throw new RuntimeException("", ex);
+    }
+  }
+  
+  static long size(Directory dir) throws IOException {
+    String[] files = dir.listAll();
+    long c = 0;
+    for (int x=0; x < files.length; x++) {
+      c +=dir.fileLength(files[x]);
+    }
+    return c;
+  }
+  
+  public Directory getRAMDirectory() {
+    return switchDirectory.getSecondaryDir();
+  }
+  
   private synchronized void setRollbackSegmentInfos(SegmentInfos infos) {
     rollbackSegmentInfos = (SegmentInfos) infos.clone();
-    assert !rollbackSegmentInfos.hasExternalSegments(directory);
+    assert !rollbackSegmentInfos.hasExternalSegments(getFlushDirectory());
     rollbackSegments = new HashMap();
     final int size = rollbackSegmentInfos.size();
     for(int i=0;i<size;i++)
       rollbackSegments.put(rollbackSegmentInfos.info(i), new Integer(i));
   }
+  
+  /**
+   * Expert: set the ram directory merge policy used by this writer.
+   */
+  public void setRAMMergePolicy(MergePolicy mp) {
+    ensureOpen();
+    if (mp == null)
+      throw new NullPointerException("MergePolicy must be non-null");
 
+    if (ramMergePolicy != mp)
+      ramMergePolicy.close();
+    ramMergePolicy = mp;
+    // by default turn off the compound file format
+    // for the ram merge policy
+    if (ramMergePolicy instanceof LogMergePolicy) {
+      LogMergePolicy lmp = (LogMergePolicy)ramMergePolicy;
+      lmp.setUseCompoundDocStore(false);
+      lmp.setUseCompoundFile(false);
+    }
+    pushMaxBufferedDocs();
+    if (infoStream != null)
+      message("setMergePolicy " + mp);
+  }
+  
   /**
    * Expert: set the merge policy used by this writer.
    */
@@ -1565,7 +1646,10 @@
     ensureOpen();
     if (mergeScheduler == null)
       throw new NullPointerException("MergeScheduler must be non-null");
-
+    boolean changeRamMS = false;
+    if (ramMergeScheduler == mergeScheduler) {
+      changeRamMS = true;
+    }
     if (this.mergeScheduler != mergeScheduler) {
       finishMerges(true);
       this.mergeScheduler.close();
@@ -1573,10 +1657,29 @@
     this.mergeScheduler = mergeScheduler;
     if (infoStream != null)
       message("setMergeScheduler " + mergeScheduler);
+    ramMergeScheduler = mergeScheduler;
   }
+  
+  /**
+   * Expert: set the merge scheduler used by this writer.
+   */
+  synchronized public void setRAMMergeScheduler(MergeScheduler ramMergeScheduler) throws CorruptIndexException, IOException {
+    ensureOpen();
+    if (ramMergeScheduler == null)
+      throw new NullPointerException("MergeScheduler must be non-null");
 
+    if (this.ramMergeScheduler != ramMergeScheduler) {
+      finishMerges(true);
+      this.ramMergeScheduler.close();
+    }
+    this.ramMergeScheduler = ramMergeScheduler;
+    this.ramMergeScheduler.setType("ram");
+    if (infoStream != null)
+      message("setRAMMergeScheduler " + ramMergeScheduler);
+  }
+
   /**
-   * Expert: returns the current MergePolicy in use by this
+   * Expert: returns the current MergeScheduler in use by this
    * writer.
    * @see #setMergePolicy
    */
@@ -1585,6 +1688,16 @@
     return mergeScheduler;
   }
 
+  /**
+   * Expert: returns the current RAM MergeScheduler in use by this
+   * writer.
+   * @see #setMergePolicy
+   */
+  public MergeScheduler getRAMMergeScheduler() {
+    ensureOpen();
+    return ramMergeScheduler;
+  }
+  
   /** <p>Determines the largest segment (measured by
    * document count) that may be merged with other segments.
    * Small values (e.g., less than 10,000) are best for
@@ -2064,7 +2177,7 @@
       if (waitForMerges)
         // Give merge scheduler last chance to run, in case
         // any pending merges are waiting:
-        mergeScheduler.merge(this);
+        scheduleMerge();
 
       mergePolicy.close();
 
@@ -2088,9 +2201,12 @@
         deleter.close();
       }
       
-      if (closeDir)
+      if (closeDir) {
         directory.close();
-
+        Directory ramDir = switchDirectory.getSecondaryDir(); 
+        ramDir.close();
+      }
+      
       if (writeLock != null) {
         writeLock.release();                          // release write lock
         writeLock = null;
@@ -2149,7 +2265,8 @@
       final String compoundFileName = docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;
 
       try {
-        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
+        Directory dir = getFlushDirectory();
+        CompoundFileWriter cfsWriter = new CompoundFileWriter(dir, compoundFileName);
         final Iterator it = docWriter.closedFiles().iterator();
         while(it.hasNext())
           cfsWriter.addFile((String) it.next());
@@ -2531,7 +2648,23 @@
       handleOOM(oom, "updateDocument");
     }
   }
-
+  
+  //for test purpose
+  final synchronized SegmentInfos getSegmentInfos() {
+    return segmentInfos;
+  }
+  
+  final synchronized SegmentInfos getSegmentInfos(Directory dir) {
+    SegmentInfos infos = new SegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        infos.add(info);
+      }
+    }
+    return infos;
+  }
+  
   // for test purpose
   final synchronized int getSegmentCount(){
     return segmentInfos.size();
@@ -2733,7 +2866,7 @@
             for(int i=0;i<size;i++) {
               final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) mergeExceptions.get(0);
               if (merge.optimize) {
-                IOException err = new IOException("background merge hit exception: " + merge.segString(directory));
+                IOException err = new IOException("background merge hit exception: " + merge.segString(merge.directory));
                 final Throwable t = merge.getException();
                 if (t != null)
                   err.initCause(t);
@@ -2805,7 +2938,7 @@
       }
     }
 
-    mergeScheduler.merge(this);
+    scheduleMerge();
 
     if (spec != null && doWait) {
       final int numMerges = spec.merges.size();
@@ -2827,7 +2960,7 @@
               running = true;
             Throwable t = merge.getException();
             if (t != null) {
-              IOException ioe = new IOException("background merge hit exception: " + merge.segString(directory));
+              IOException ioe = new IOException("background merge hit exception: " + merge.segString(merge.directory));
               ioe.initCause(t);
               throw ioe;
             }
@@ -2892,13 +3025,91 @@
   private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException {
     maybeMerge(1, optimize);
   }
-
+  
+  long getRamBufferSizeLong() {
+    return (long)(getRAMBufferSizeMB()*(double)1024*(double)1024);
+  }
+  
   private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
-    updatePendingMerges(maxNumSegmentsOptimize, optimize);
-    mergeScheduler.merge(this);
+    updatePendingMerges(maxNumSegmentsOptimize, optimize, false);
+    scheduleMerge();
   }
-
-  private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize)
+  
+  synchronized void maybeMergeRamToDir() throws IOException {
+    SegmentInfos ramInfos = getRamDirSegmentInfos();
+    long ramSegSize = size(ramInfos);
+    long totalRamUsed = ramSegSize + docWriter.getRAMUsed();
+    if (totalRamUsed > getRamBufferSizeLong()) {
+      // call updatePendingMerges, optimize the ramInfos to directory
+      System.out.println("maybeMergeRamToDir updatePendingMerges");
+      updatePendingMerges(1, true, mergePolicy, ramInfos, directory);
+    }
+  }
+  
+  /**
+   * Merge the ram segments to disk and optimize
+   * them to minimize future disk merges.
+   * @param doWait
+   * @throws IOException
+   */
+  synchronized void mergeRamSegmentsToDisk(boolean doWait) throws IOException {
+    SegmentInfos ramInfos = getRamDirSegmentInfos();
+    if (ramInfos.size() > 0) {
+      segmentsToOptimize = new HashSet();
+      final int numSegments = ramInfos.size();
+      for(int i=0;i<numSegments;i++)
+        segmentsToOptimize.add(ramInfos.info(i));
+      updatePendingMerges(1, true, mergePolicy, ramInfos, directory);
+      if (doWait) {
+        MergePolicy.OneMerge merge = getNextMerge(new Directory[] {directory});
+        assert merge != null;
+        merge(merge);
+      }
+    }
+  }
+  
+  void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize, boolean forceToDir) throws IOException {
+    synchronized (this) {
+      if (poolReaders) {
+        //System.out.println("updatePendingMerges maxNumSegmentsOptimize:"+maxNumSegmentsOptimize+" optimize: "+optimize+" forceToDir:"+forceToDir);
+        SegmentInfos ramInfos = getRamDirSegmentInfos();
+        long ramSegSize = size(ramInfos);
+        long totalRamUsed = ramSegSize + docWriter.getRAMUsed();
+        //System.out.println("totalRamUsed:"+totalRamUsed+" rambufsize:"+getRamBufferSizeLong());
+        if (forceToDir || (totalRamUsed > getRamBufferSizeLong())) {
+          mergeRamSegmentsToDisk(false);
+          updatePendingMerges(maxNumSegmentsOptimize, optimize, mergePolicy, segmentInfos, directory);
+        } else {
+          updatePendingMerges(maxNumSegmentsOptimize, optimize, mergePolicy, segmentInfos, directory);
+          updatePendingMerges(maxNumSegmentsOptimize, optimize, ramMergePolicy, ramInfos, switchDirectory);
+        }
+      } else {
+        updatePendingMerges(maxNumSegmentsOptimize, optimize, mergePolicy, segmentInfos, directory);
+      }
+    }
+    if (forceToDir) {
+      scheduleMerge();
+    }
+  }
+  
+  void scheduleMerge() throws IOException {
+    if (mergeScheduler != ramMergeScheduler) {
+      mergeScheduler.merge(this);
+      ramMergeScheduler.merge(this);
+    } else {
+      mergeScheduler.merge(this);
+    }
+  }
+  
+  synchronized long size(SegmentInfos infos) throws IOException {
+    long size = 0;
+    for (int x=0; x < infos.size(); x++) {
+      size += infos.info(x).sizeInBytes();
+    }
+    return size;
+  }
+  
+  private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize, MergePolicy mergePolicy, SegmentInfos segmentInfos, Directory directory)
     throws CorruptIndexException, IOException {
     assert !optimize || maxNumSegmentsOptimize > 0;
 
@@ -2913,7 +3124,6 @@
     final MergePolicy.MergeSpecification spec;
     if (optimize) {
       spec = mergePolicy.findMergesForOptimize(segmentInfos, this, maxNumSegmentsOptimize, segmentsToOptimize);
-
       if (spec != null) {
         final int numMerges = spec.merges.size();
         for(int i=0;i<numMerges;i++) {
@@ -2928,8 +3138,11 @@
 
     if (spec != null) {
       final int numMerges = spec.merges.size();
-      for(int i=0;i<numMerges;i++)
-        registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
+      for(int i=0;i<numMerges;i++) {
+        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(i);
+        merge.directory = directory; // set the destination directory
+        registerMerge(merge);
+      }
     }
   }
 
@@ -2946,6 +3159,30 @@
       return merge;
     }
   }
+  
+  /** Expert: the {@link MergeScheduler} calls this method
+   *  to retrieve the next merge requested by the
+   *  MergePolicy */
+  synchronized MergePolicy.OneMerge getNextMerge(Directory[] dirs) {
+    if (pendingMerges.size() == 0)
+      return null;
+    else {
+      Iterator it = pendingMerges.iterator();
+      while(it.hasNext()) {
+        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
+        assert merge.directory != null;
+        for (int x=0; x < dirs.length; x++) {
+          if (merge.directory == dirs[x]) {
+            // Advance the merge from pending to running
+            it.remove();
+            runningMerges.add(merge);
+            return merge;
+          }
+        }
+      }
+      return null;
+    }
+  }
 
   /** Like getNextMerge() except only returns a merge if it's
    *  external. */
@@ -3249,7 +3486,7 @@
       while(it.hasNext()) {
         final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
         if (infoStream != null)
-          message("now abort pending merge " + merge.segString(directory));
+          message("now abort pending merge " + merge.segString(merge.directory));
         merge.abort();
         mergeFinish(merge);
       }
@@ -3259,7 +3496,7 @@
       while(it.hasNext()) {
         final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
         if (infoStream != null)
-          message("now abort running merge " + merge.segString(directory));
+          message("now abort running merge " + merge.segString(merge.directory));
         merge.abort();
       }
 
@@ -3552,7 +3789,7 @@
   }
 
   private boolean hasExternalSegments() {
-    return segmentInfos.hasExternalSegments(directory);
+    return segmentInfos.hasExternalSegments(getFlushDirectory());
   }
 
   /* If any of our segments are using a directory != ours
@@ -3584,7 +3821,7 @@
           if (info.dir != directory) {
             done = false;
             final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), info.getUseCompoundFile());
-
+            newMerge.directory = directory; 
             // Returns true if no running merge conflicts
             // with this one (and, records this merge as
             // pending), ie, this segment is not currently
@@ -3627,9 +3864,9 @@
     if (any)
       // Sometimes, on copying an external segment over,
       // more merges may become necessary:
-      mergeScheduler.merge(this);
+      scheduleMerge();
   }
-
+  
   /** Merges the provided indexes into this index.
    * <p>After this completes, the index is optimized. </p>
    * <p>The provided IndexReaders are not closed.</p>
@@ -3873,7 +4110,6 @@
   }
 
   private final void prepareCommit(String commitUserData, boolean internal) throws CorruptIndexException, IOException {
-
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
     }
@@ -3886,9 +4122,10 @@
 
     if (infoStream != null)
       message("prepareCommit: flush");
-
+    
     flush(true, true, true);
-
+    // merge ram segments to disk synchronously
+    mergeRamSegmentsToDisk(true);
     startCommit(0, commitUserData);
   }
 
@@ -3983,9 +4220,24 @@
     if (infoStream != null)
       message("commit: done");
   }
-
+  
+  public static SegmentInfos getInfosByDir(SegmentInfos infos, Directory dir) throws IOException {
+    SegmentInfos dirInfos = new SegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        dirInfos.add(info);
+      }
+    }
+    return dirInfos;
+  }
+  
+  synchronized SegmentInfos getRamDirSegmentInfos() throws IOException {
+    return getInfosByDir(segmentInfos, switchDirectory);
+  }
+  
   /**
-   * Flush all in-memory buffered udpates (adds and deletes)
+   * Flush all in-memory buffered updates (adds and deletes)
    * to the Directory.
    * @param triggerMerge if true, we may merge segments (if
    *  deletes or docs were flushed) if necessary
@@ -3995,10 +4247,11 @@
    *  be flushed
    */
   protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
-    // We can be called during close, when closing==true, so we must pass false to ensureOpen:
     ensureOpen(false);
     if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
       maybeMerge();
+    // check if we need to now merge to disk if we're in NRT RAM dir mode
+    maybeMergeRamToDir();
   }
 
   // TODO: this method should not have to be entirely
@@ -4129,9 +4382,10 @@
         // Create new SegmentInfo, but do not add to our
         // segmentInfos until deletes are flushed
         // successfully.
+        Directory segDir = getFlushDirectory();
         newSegment = new SegmentInfo(segment,
                                      flushedDocCount,
-                                     directory, false, true,
+                                     segDir, false, true,
                                      docStoreOffset, docStoreSegment,
                                      docStoreIsCompoundFile,    
                                      docWriter.hasProx());
@@ -4156,7 +4410,7 @@
         // Now build compound file
         boolean success = false;
         try {
-          docWriter.createCompoundFile(segment);
+          docWriter.createCompoundFile(segment, getFlushDirectory());
           success = true;
         } finally {
           if (!success) {
@@ -4200,20 +4454,23 @@
 
     int first = segmentInfos.indexOf(merge.segments.info(0));
     if (first == -1)
-      throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), directory);
+      throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), merge.directory);
 
     final int numSegments = segmentInfos.size();
     
     final int numSegmentsToMerge = merge.segments.size();
     for(int i=0;i<numSegmentsToMerge;i++) {
       final SegmentInfo info = merge.segments.info(i);
-
-      if (first + i >= numSegments || !segmentInfos.info(first+i).equals(info)) {
+      
+      boolean n1 = first + i >= numSegments;
+      boolean n2 = !segmentInfos.info(first+i).equals(info);
+      if (n1 || n2) {
+      //if (first + i >= numSegments || !segmentInfos.info(first+i).equals(info)) {
         if (segmentInfos.indexOf(info) == -1)
-          throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory);
+          throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), merge.directory);
         else
-          throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString(directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
-                                               directory);
+          throw new MergePolicy.MergeException("n1: "+n1+" n2:"+n2+" MergePolicy selected non-contiguous segments to merge (" + merge.segString(merge.directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
+                                               merge.directory);
       }
     }
 
@@ -4236,7 +4493,7 @@
     final SegmentInfos sourceSegments = merge.segments;
 
     if (infoStream != null)
-      message("commitMergeDeletes " + merge.segString(directory));
+      message("commitMergeDeletes " + merge.segString(merge.directory));
 
     // Carefully merge deletes that occurred after we
     // started merging:
@@ -4305,7 +4562,7 @@
     }
 
     if (infoStream != null)
-      message("commitMerge: " + merge.segString(directory) + " index=" + segString());
+      message("commitMerge: " + merge.segString(merge.directory) + " index=" + segString());
 
     assert merge.registerDone;
 
@@ -4317,7 +4574,7 @@
     // abort this merge
     if (merge.isAborted()) {
       if (infoStream != null)
-        message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
+        message("commitMerge: skipping merge " + merge.segString(merge.directory) + ": it was aborted");
 
       deleter.refresh(merge.info.name);
       return false;
@@ -4371,7 +4628,7 @@
   final private void handleMergeException(Throwable t, MergePolicy.OneMerge merge) throws IOException {
 
     if (infoStream != null) {
-      message("handleMergeException: merge=" + merge.segString(directory) + " exc=" + t);
+      message("handleMergeException: merge=" + merge.segString(merge.directory) + " exc=" + t);
     }
 
     // Set the exception on the merge, so if
@@ -4416,7 +4673,7 @@
           mergeInit(merge);
 
           if (infoStream != null)
-            message("now merge\n  merge=" + merge.segString(directory) + "\n  merge=" + merge + "\n  index=" + segString());
+            message("now merge\n  merge=" + merge.segString(merge.directory) + "\n  merge=" + merge + "\n  index=" + segString());
 
           mergeMiddle(merge);
           success = true;
@@ -4437,8 +4694,9 @@
           // This merge (and, generally, any change to the
           // segments) may now enable new merges, so we call
           // merge policy & update pending merges.
-          if (success && !merge.isAborted() && !closed && !closing)
-            updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
+          if (success && !merge.isAborted() && !closed && !closing) {
+            updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize, false);
+          }
         }
       }
     } catch (OutOfMemoryError oom) {
@@ -4459,9 +4717,9 @@
 
     if (stopMerges) {
       merge.abort();
-      throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.segString(directory));
+      throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.segString(merge.directory));
     }
-
+    
     final int count = merge.segments.size();
     boolean isExternal = false;
     for(int i=0;i<count;i++) {
@@ -4470,7 +4728,7 @@
         return false;
       if (segmentInfos.indexOf(info) == -1)
         return false;
-      if (info.dir != directory)
+      if (info.dir != directory && info.dir != switchDirectory)
         isExternal = true;
     }
 
@@ -4635,7 +4893,7 @@
     // ConcurrentMergePolicy we keep deterministic segment
     // names.
     merge.info = new SegmentInfo(newSegmentName(), 0,
-                                 directory, false, true,
+                                 (merge.directory != null ? merge.directory : directory), false, true,
                                  docStoreOffset,
                                  docStoreSegment,
                                  docStoreIsCompoundFile,
@@ -4647,7 +4905,22 @@
     // CFS:
     mergingSegments.add(merge.info);
   }
-
+  
+  static List getSegmentNames(SegmentInfos infos) {
+    List list = new ArrayList();
+    for (int x=0; x < infos.size(); x++) {
+      list.add(infos.info(x).name);
+    }
+    return list;
+  }
+  
+  String printDir(Directory dir) {
+    if (dir == switchDirectory) return "switchdir";
+    if (dir == directory) return "realdir";
+    if (switchDirectory != null && dir == switchDirectory.getSecondaryDir()) return "ramdir";
+    return "unknown";
+  }
+  
   /** This is called after merging a segment and before
    *  building its CFS.  Return true if the files should be
    *  sync'd.  If you return false, then the source segment
@@ -4717,7 +4990,7 @@
   final private int mergeMiddle(MergePolicy.OneMerge merge) 
     throws CorruptIndexException, IOException {
     
-    merge.checkAborted(directory);
+    merge.checkAborted(merge.directory);
 
     final String mergedName = merge.info.name;
     
@@ -4729,7 +5002,7 @@
     final int numSegments = sourceSegments.size();
 
     if (infoStream != null)
-      message("merging " + merge.segString(directory));
+      message("merging " + merge.segString(merge.directory));
 
     merger = new SegmentMerger(this, mergedName, merge);
 
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java	(revision 771171)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java	(working copy)
@@ -19,7 +19,9 @@
 
 import java.io.IOException;
 import java.util.Set;
+import java.util.List;
 
+import org.apache.lucene.index.MergePolicy.OneMerge;
 import org.apache.lucene.store.Directory;
 
 /** <p>This class implements a {@link MergePolicy} that tries
@@ -301,7 +303,7 @@
 
     return spec;
   }
-
+  
   /** Checks if any merges are now necessary and returns a
    *  {@link MergePolicy.MergeSpecification} if so.  A merge
    *  is necessary when there are more than {@link
@@ -309,8 +311,7 @@
    *  multiple levels have too many segments, this method
    *  will return multiple merges, allowing the {@link
    *  MergeScheduler} to use concurrency. */
-  public MergeSpecification findMerges(SegmentInfos infos, IndexWriter writer) throws IOException {
-
+  public MergeSpecification findMerges(SegmentInfos infos, IndexWriter writer) throws IOException {  
     final int numSegments = infos.size();
     this.writer = writer;
     if (verbose())
@@ -321,8 +322,6 @@
     float[] levels = new float[numSegments];
     final float norm = (float) Math.log(mergeFactor);
 
-    final Directory directory = writer.getDirectory();
-
     for(int i=0;i<numSegments;i++) {
       final SegmentInfo info = infos.info(i);
       long size = size(info);
Index: src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/MergePolicy.java	(revision 771171)
+++ src/java/org/apache/lucene/index/MergePolicy.java	(working copy)
@@ -85,6 +85,7 @@
     final boolean useCompoundFile;
     boolean aborted;
     Throwable error;
+    Directory directory;
 
     public OneMerge(SegmentInfos segments, boolean useCompoundFile) {
       if (0 == segments.size())
Index: src/java/org/apache/lucene/index/MergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/MergeScheduler.java	(revision 771171)
+++ src/java/org/apache/lucene/index/MergeScheduler.java	(working copy)
@@ -19,6 +19,8 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.store.Directory;
+
 /** <p>Expert: {@link IndexWriter} uses an instance
  *  implementing this interface to execute the merges
  *  selected by a {@link MergePolicy}.  The default
@@ -35,7 +37,36 @@
 */
 
 public abstract class MergeScheduler {
-
+  private String type = "primary";
+  
+  void setType(String type) {
+    this.type = type;
+  }
+  
+  String getType() {
+    return type;
+  }
+  
+  protected Directory[] getDestinationDirs(IndexWriter writer) throws IOException {
+    MergeScheduler ms = writer.mergeScheduler;
+    MergeScheduler ramMs = writer.ramMergeScheduler;
+    Directory dir = writer.getDirectory();
+    Directory flushDir = writer.getFlushDirectory();
+    if (dir == flushDir) {
+      return new Directory[] {dir};
+    } else {
+      if (ramMs == ms) {
+        return new Directory[] {dir, flushDir};
+      } else {
+        if (type.equals("ram")) {
+          return new Directory[] {flushDir};
+        } else {
+          return new Directory[] {dir};
+        }
+      }
+    }
+  }
+  
   /** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
   abstract void merge(IndexWriter writer)
     throws CorruptIndexException, IOException;
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 771171)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -83,14 +83,16 @@
     // no need to process segments in reverse order
     final int numSegments = infos.size();
     SegmentReader[] readers = new SegmentReader[numSegments];
-    final Directory dir = writer.getDirectory();
+    final Directory flushDir = writer.getFlushDirectory();
     int upto = 0;
 
     for (int i=0;i<numSegments;i++) {
       boolean success = false;
       try {
         final SegmentInfo info = infos.info(upto);
-        if (info.dir == dir) {
+        // accept the regular and ram dirs (not the external dir 
+        // from addIndexesNoOptimize)
+        if (flushDir.equals(info.dir)) {
           readers[upto++] = writer.readerPool.getReadOnlyClone(info, true);
         }
         success = true;
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java	(revision 771171)
+++ src/java/org/apache/lucene/index/SegmentInfos.java	(working copy)
@@ -812,12 +812,27 @@
     final int size = size();
     for(int i=0;i<size;i++) {
       final SegmentInfo info = info(i);
-      if (info.dir == dir) {
+      if (dir.equals(dir)) {
         files.addAll(info(i).files());
       }
     }
     return files;
   }
+  
+  public Collection files(Directory dir, Directory ramDir, boolean includeSegmentsFile) throws IOException {
+    HashSet files = new HashSet();
+    if (includeSegmentsFile) {
+      files.add(getCurrentSegmentFileName());
+    }
+    final int size = size();
+    for(int i=0;i<size;i++) {
+      final SegmentInfo info = info(i);
+      if (info.dir == dir || info.dir == ramDir) {
+        files.addAll(info(i).files());
+      }
+    }
+    return files;
+  }
 
   public final void finishCommit(Directory dir) throws IOException {
     if (pendingOutput == null)
@@ -920,9 +935,11 @@
   // Used only for testing
   boolean hasExternalSegments(Directory dir) {
     final int numSegments = size();
-    for(int i=0;i<numSegments;i++)
-      if (info(i).dir != dir)
+    for(int i=0;i<numSegments;i++) {
+      if (!dir.equals(info(i).dir)) {
         return true;
+      }
+    }
     return false;
   }
 }
Index: src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMerger.java	(revision 771171)
+++ src/java/org/apache/lucene/index/SegmentMerger.java	(working copy)
@@ -78,7 +78,8 @@
   }
 
   SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge) {
-    directory = writer.getDirectory();
+    if (merge != null) directory = merge.directory;
+    if (directory == null) directory = writer.getDirectory();
     segment = name;
     if (merge != null)
       checkAbort = new CheckAbort(merge, directory);
@@ -690,7 +691,7 @@
     public void work(double units) throws MergePolicy.MergeAbortedException {
       workCount += units;
       if (workCount >= 10000.0) {
-        merge.checkAborted(dir);
+        merge.checkAborted(merge.directory);
         workCount = 0;
       }
     }
Index: src/java/org/apache/lucene/index/SerialMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/SerialMergeScheduler.java	(revision 771171)
+++ src/java/org/apache/lucene/index/SerialMergeScheduler.java	(working copy)
@@ -19,23 +19,26 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.store.Directory;
+
 /** A {@link MergeScheduler} that simply does each merge
  *  sequentially, using the current thread. */
-public class SerialMergeScheduler extends MergeScheduler {
-
+public class SerialMergeScheduler extends MergeScheduler {  
   /** Just do the merges in sequence. We do this
    * "synchronized" so that even if the application is using
    * multiple threads, only one merge may run at a time. */
   synchronized public void merge(IndexWriter writer)
     throws CorruptIndexException, IOException {
-
+    
+    Directory[] destDirs = getDestinationDirs(writer);
+    
     while(true) {
-      MergePolicy.OneMerge merge = writer.getNextMerge();
+      MergePolicy.OneMerge merge = writer.getNextMerge(destDirs);
       if (merge == null)
         break;
       writer.merge(merge);
     }
   }
-
+  
   public void close() {}
 }
Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/StoredFieldsWriter.java	(revision 771171)
+++ src/java/org/apache/lucene/index/StoredFieldsWriter.java	(working copy)
@@ -20,6 +20,7 @@
 import java.util.Map;
 import java.io.IOException;
 import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.ArrayUtil;
 
 /** This is a DocFieldConsumer that writes stored fields. */
@@ -62,7 +63,8 @@
       final String docStoreSegment = docWriter.getDocStoreSegment();
       if (docStoreSegment != null) {
         assert docStoreSegment != null;
-        fieldsWriter = new FieldsWriter(docWriter.directory,
+        Directory directory = docWriter.writer.getFlushDirectory();
+        fieldsWriter = new FieldsWriter(directory,
                                         docStoreSegment,
                                         fieldInfos);
         docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_EXTENSION);
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(revision 771171)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.ArrayUtil;
 
 import java.io.IOException;
@@ -152,9 +153,10 @@
       // vector output files, we must abort this segment
       // because those files will be in an unknown
       // state:
-      tvx = docWriter.directory.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
-      tvd = docWriter.directory.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
-      tvf = docWriter.directory.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+      Directory dir = docWriter.writer.getFlushDirectory();
+      tvx = dir.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+      tvd = dir.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+      tvf = dir.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
       
       tvx.writeInt(TermVectorsReader.FORMAT_CURRENT);
       tvd.writeInt(TermVectorsReader.FORMAT_CURRENT);
Index: src/java/org/apache/lucene/store/FileSwitchDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/FileSwitchDirectory.java	(revision 771171)
+++ src/java/org/apache/lucene/store/FileSwitchDirectory.java	(working copy)
@@ -33,7 +33,7 @@
 public class FileSwitchDirectory extends Directory {
   private final Directory secondaryDir;
   private final Directory primaryDir;
-  private final Set primaryExtensions;
+  private Set primaryExtensions;
   private boolean doClose;
 
   public FileSwitchDirectory(Set primaryExtensions, Directory primaryDir, Directory secondaryDir, boolean doClose) {
@@ -44,6 +44,47 @@
     this.lockFactory = primaryDir.getLockFactory();
   }
   
+  /**
+   * This constructor has no extensions so the directory a file
+   * is associated with is specified by whether it exists
+   * in the directory.
+   * @param primaryDir
+   * @param secondaryDir
+   * @param doClose
+   */
+  public FileSwitchDirectory(Directory primaryDir, Directory secondaryDir, boolean doClose) {
+    this.primaryDir = primaryDir;
+    this.secondaryDir = secondaryDir;
+    this.doClose = doClose;
+    this.lockFactory = primaryDir.getLockFactory();
+  }
+  
+  public boolean equals(Object other) {
+    if (other instanceof FileSwitchDirectory) {
+      return super.equals(other);
+    }
+    if (other instanceof Directory) {
+      return primaryDir.equals(other) || secondaryDir.equals(other);
+    }
+    return false;
+  }
+  
+  public synchronized Set getPrimaryExtensions() {
+    return primaryExtensions;
+  }
+  
+  public synchronized void setPrimaryExtensions(Set primaryExtensions) {
+    this.primaryExtensions = primaryExtensions;
+  }
+  
+  public Directory getPrimaryDir() {
+    return primaryDir;
+  }
+  
+  public Directory getSecondaryDir() {
+    return secondaryDir;
+  }
+  
   public void close() throws IOException {
     if (doClose) {
       try {
@@ -80,7 +121,10 @@
     return name.substring(i+1, name.length());
   }
 
-  private Directory getDirectory(String name) {
+  private Directory getDirectory(String name) throws IOException {
+    // if the file exists somewhere run fileExists
+    if (primaryDir.fileExists(name)) return primaryDir;
+    if (secondaryDir.fileExists(name)) return secondaryDir;
     String ext = getExtension(name);
     if (primaryExtensions.contains(ext)) {
       return primaryDir;
Index: src/test/org/apache/lucene/index/TestCrash.java
===================================================================
--- src/test/org/apache/lucene/index/TestCrash.java	(revision 771171)
+++ src/test/org/apache/lucene/index/TestCrash.java	(working copy)
@@ -49,7 +49,7 @@
     return writer;
   }
 
-  private void crash(final IndexWriter writer) throws IOException {
+  public static void crash(final IndexWriter writer) throws IOException {
     final MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
     ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getMergeScheduler();
     dir.crash();
Index: src/test/org/apache/lucene/index/TestIndexWriterRamDir.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterRamDir.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexWriterRamDir.java	(revision 0)
@@ -0,0 +1,417 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.FileSwitchDirectory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests the IndexWriter ram directory where if a ram directory is 
+ * set on the writer, flushes occur to it first, then are later
+ * merged to disk.
+ */
+// TODO: test setting ram mergescheduler
+public class TestIndexWriterRamDir extends LuceneTestCase {
+  public void testHasExternalSegments() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    FileSwitchDirectory fsd = (FileSwitchDirectory)writer.getFlushDirectory();
+    
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.commit(); // commit to primary dir
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(); // flush to ram
+    
+    // we have ram and primary dir segments, make sure 
+    // hasExternalSegments works for FileSwitchDirectory
+    boolean hes = writer.getSegmentInfos().hasExternalSegments(writer.getFlushDirectory());
+    assertFalse(hes);
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Test ram NRT with an FSDirectory
+   * @throws IOException
+   */
+  public void testFSDirectory() throws IOException {
+    String tempDir = System.getProperty("java.io.tmpdir");
+    if (tempDir == null)
+        throw new IOException("java.io.tmpdir undefined, cannot run test");
+    File indexDir = new File(tempDir, "lucenetestindexwriterramdir");
+    File[] files = indexDir.listFiles();
+    for (int x=0; x < files.length; x++) {
+      if (!files[x].isDirectory()) files[x].delete();
+    }
+    Directory dir = FSDirectory.getDirectory(indexDir);
+    IndexWriter.unlock(dir);
+    openWriterAddDocs(dir, false);
+    dir.close();
+    dir = FSDirectory.getDirectory(indexDir);
+    IndexWriter.unlock(dir);
+    openWriterAddDocs(dir, true);
+    dir.close();
+  }
+  
+  public static void openWriterAddDocs(Directory dir, boolean commitClose) throws IOException {
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    Set infoFiles = new HashSet();
+    SegmentInfos infos = writer.getSegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      List files = info.files();
+      for (int i=0; i < files.size(); i++) {
+        String f = (String)files.get(i);
+        infoFiles.add(f);
+      }
+    }
+    System.out.println("infos files:"+infoFiles);
+    System.out.println("dirfiles:"+printFiles(dir));
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush();
+    if (commitClose) {
+      writer.commit();
+      writer.close();
+    }
+  }
+  
+  /**
+   * Make sure after IndexWriter has created
+   * FileSwitchDirectory based .fdt files which 
+   * are written to the primary directory, 
+   * and IW cuts out before being closed and the ram segments
+   * flushed to the primary dir, a new IW (via IndexFileDeleter) 
+   * cleans up the now unused .fdt files.
+   * 
+   * IndexWriter is created, the 
+   * @throws IOException
+   */
+  public void testIFDDeletingAfterCrash() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    FileSwitchDirectory fsd = (FileSwitchDirectory)writer.getFlushDirectory();
+    
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush();
+    Set segmentNames = getSegmentNames(fsd.getSecondaryDir());
+    List fdtFileNames = new ArrayList();
+    Iterator segNameIterator = segmentNames.iterator();
+    while (segNameIterator.hasNext()) {
+      String name = (String)segNameIterator.next();
+      String fileName = name+".fdt";
+      if (dir.fileExists(fileName)) {
+        fdtFileNames.add(fileName);
+      }
+    }
+    
+    System.out.println("fdtFileNames:"+fdtFileNames);
+    
+    IndexWriter.unlock(dir);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    System.out.println("dirfiles:"+printFiles(dir));
+    // now verify the fdt files are no longer in the dir
+    for (int x=0; x < fdtFileNames.size(); x++) {
+      String name = (String)fdtFileNames.get(x);
+      assertFalse(dir.fileExists(name));
+    }
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Get all the unique .tii files, get the segment names
+   * and return them
+   * @throws IOException
+   */
+  public static Set getSegmentNames(Directory dir) throws IOException {
+    Set set = new HashSet();
+    String[] files = dir.listAll();
+    for (int x=0; x < files.length; x++) {
+      if (files[x].endsWith(".tii")) {
+        String str = files[x].substring(0, files[x].indexOf('.'));
+        set.add(str);
+      }
+    }
+    return set;
+  }
+  
+  public void testFSDFilesInPrimaryDir() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    //writer.setRAMBufferSizeMB(0.06); // 60 kilobytes should be exceeded quickly
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    assertTrue(writer.getDirectory() != writer.getFlushDirectory());
+    FileSwitchDirectory fsd = (FileSwitchDirectory)writer.getFlushDirectory();
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }    
+    // we should have some fdx,fdt,tvx files in the primary dir
+    writer.flush();
+    writer.commit();
+    cms.sync();
+    //Set segNames = getSegmentNames(writer.getFlushDirectory());
+    //System.out.println("segnames:"+segNames);
+    
+    //System.out.println("dir:"+printFiles(dir));
+    //System.out.println("ramDir:"+printFiles(ramDir));
+    // right files aren't there
+    
+    writer.updatePendingMerges(1, false, true);
+    cms.sync(); // wait for the merges to complete
+    
+    //assertTrue(hasDirSegmentInfos(writer.getFlushDirectory(), writer.getSegmentInfos()));
+    
+    //System.out.println("infos.size:"+writer.getSegmentInfos().size());
+    
+    assertTrue(hasDirSegmentInfos(dir, writer.getSegmentInfos()));
+    IndexReader reader = writer.getReader();
+    assertEquals(1000, reader.maxDoc());
+    SegmentInfos infos = writer.getSegmentInfos();
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    writer.close();
+    reader.close();
+    dir.close();
+  }
+  
+  public static String printFiles(Directory dir) throws IOException {
+    String[] files = dir.listAll();
+    List filesList = new ArrayList();
+    for (int x=0; x < files.length; x++) {
+      filesList.add(files[x]);
+    }
+    return filesList.toString();
+  }
+  
+  /**
+   * Test IndexWriter doing in ram merges
+   * @throws IOException
+   */
+  public void testMergeRamExceeded() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    writer.setRAMBufferSizeMB(0.06); // 60 kilobytes should be exceeded quickly
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    assertTrue(writer.getDirectory() != writer.getFlushDirectory());
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    ramMP.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    //SegmentInfos flushInfos = writer.getSegmentInfos(writer.getFlushDirectory());
+    //System.out.println("flushInfos.size:"+flushInfos.size());
+    cms.sync();
+    
+    //assertTrue(hasDirSegmentInfos(writer.getFlushDirectory(), writer.getSegmentInfos()));
+    
+    //System.out.println("infos.size:"+writer.getSegmentInfos().size());
+    
+    assertTrue(hasDirSegmentInfos(dir, writer.getSegmentInfos()));
+    IndexReader reader = writer.getReader();
+    assertEquals(1000, reader.maxDoc());
+    SegmentInfos infos = writer.getSegmentInfos();
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    writer.close();
+    reader.close();
+    dir.close();
+  }
+  
+  /**
+   * Test IndexWriter doing in ram merges
+   * @throws IOException
+   */
+  public void testMergeInRam() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    ramMP.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertTrue(writer.getRamDirSegmentInfos().size() > 0);
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Test IndexWriter merging to disk
+   * @throws IOException
+   */
+  public void testMergeToDisk() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader();
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    //ramMP.setMergeFactor(2);
+    ramMP.setUseCompoundDocStore(false);
+    ramMP.setUseCompoundFile(false);
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    assertTrue(infos.size() == 10);
+    assertTrue(hasDirSegmentInfos(writer.getFlushDirectory(), infos));
+    writer.updatePendingMerges(1, false, true);
+    cms.sync(); // wait for the merges to complete
+    //System.out.println("ram infos.size:"+writer.getRamDirSegmentInfos().size()+" "+IndexWriter.getSegmentNames(writer.getRamDirSegmentInfos()));
+    // there shouldn't be any ram segments
+    //SegmentInfos ramInfos = IndexWriter.getInfosByDir(infos, ramDir);
+    SegmentInfos ramInfos = writer.getRamDirSegmentInfos();
+    // make sure the number of segments decreased
+    assertTrue(ramInfos.size() < 3);
+    
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  private static boolean hasDirSegmentInfos(Directory dir, SegmentInfos infos) {
+    int dirSegs = 0;
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        dirSegs++;
+      }
+    }
+    return dirSegs > 0;
+  }
+  
+  /**
+   * Test the ram dir is working and that the
+   * reader returned from IndexWriter encompasses
+   * the in ram segments.
+   * @throws IOException
+   */
+  public void testRamDir() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.getReader(); // turn on NRT
+    for (int x=0; x < 100; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(false, false, true);
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertTrue(writer.getDirectory() != writer.getFlushDirectory());
+    boolean hasRamDirSegs = hasDirSegmentInfos(writer.getFlushDirectory(), infos);
+    //System.out.println("ramDirSegs:"+ramDirSegs);
+    assertTrue(hasRamDirSegs);
+    IndexReader ramReader = writer.getReader();
+    assertEquals(100, ramReader.maxDoc());
+    writer.close();
+    ramReader.close();
+    dir.close();
+  }
+}
