Index: CHANGES.txt
===================================================================
--- CHANGES.txt	(revision 776405)
+++ CHANGES.txt	(working copy)
@@ -312,6 +312,12 @@
     deletions into account when considering merges.  (Yasuhiro Matsuda
     via Mike McCandless)
     
+25. LUCENE-1313: Near realtime search enhancement.  IndexWriter
+    accepts a RAMDirectory that segments are flushed to (instead of 
+    to the real directory).  This can be faster than flushing and 
+    merging small segments using only an FSDirectory.  
+    (Jason Rutherglen via Mike McCandless)
+    
 Optimizations
 
  1. LUCENE-1427: Fixed QueryWrapperFilter to not waste time computing
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(revision 776405)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java	(working copy)
@@ -207,7 +207,7 @@
           }
 
           if (verbose())
-            message("  consider merge " + merge.segString(dir));
+            message("  consider merge " + merge.segString());
       
           assert mergeThreadCount() < maxThreadCount;
 
@@ -296,7 +296,7 @@
           if (merge != null) {
             writer.mergeInit(merge);
             if (verbose())
-              message("  merge thread: do another merge " + merge.segString(dir));
+              message("  merge thread: do another merge " + merge.segString());
           } else
             break;
         }
@@ -328,7 +328,7 @@
       MergePolicy.OneMerge merge = getRunningMerge();
       if (merge == null)
         merge = startMerge;
-      return "merge thread: " + merge.segString(dir);
+      return "merge thread: " + merge.segString();
     }
   }
 
Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(revision 776405)
+++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(working copy)
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.store.Directory;
 
 /**
  * Gathers all Fieldables for a document under the same
@@ -146,7 +147,6 @@
   }
 
   public DocumentsWriter.DocWriter processDocument() throws IOException {
-
     consumer.startDocument();
     final Document doc = docState.doc;
 
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 776405)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -25,6 +25,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.util.ArrayUtil;
 
@@ -110,7 +111,6 @@
 final class DocumentsWriter {
 
   IndexWriter writer;
-  Directory directory;
 
   String segment;                         // Current segment we are working on
   private String docStoreSegment;         // Current doc-store segment we are writing
@@ -130,14 +130,15 @@
                                           // pause (eg to flush)
   boolean flushPending;                   // True when a thread has decided to flush
   boolean bufferIsFull;                   // True when it's time to write segment
+  boolean ramDirIsFull;
   private boolean aborting;               // True if an abort is pending
+  Directory flushToDir;
 
   private DocFieldProcessor docFieldProcessor;
 
   PrintStream infoStream;
   int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
   Similarity similarity;
-
   List newFiles;
 
   static class DocState {
@@ -235,6 +236,10 @@
   private long ramBufferSize = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
   private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
   private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
+  
+  private long ramDirSizeMax = 0;
+  private long ramDirSize = 0;
+  private long ramTotalMax = 0;
 
   // If we've allocated 5% over our RAM budget, we then
   // free down to 95%
@@ -256,11 +261,10 @@
   synchronized void setFlushedDocCount(int n) {
     flushedDocCount = n;
   }
-
+  
   private boolean closed;
 
-  DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain) throws IOException {
-    this.directory = directory;
+  DocumentsWriter(IndexWriter writer, IndexingChain indexingChain) throws IOException {
     this.writer = writer;
     this.similarity = writer.getSimilarity();
     flushedDocCount = writer.maxDoc();
@@ -270,7 +274,7 @@
       docFieldProcessor = (DocFieldProcessor) consumer;
     }
   }
-
+  
   /** Returns true if any of the fields in the current
    *  buffered docs have omitTermFreqAndPositions==false */
   boolean hasProx() {
@@ -285,7 +289,15 @@
     for(int i=0;i<threadStates.length;i++)
       threadStates[i].docState.infoStream = infoStream;
   }
-
+  
+  void pushRamDirSize() {
+    this.ramDirSize = writer.getRamDirSize();
+  }
+  
+  long getRamDirSize() {
+    return ramDirSize;
+  }
+  
   synchronized void setMaxFieldLength(int maxFieldLength) {
     this.maxFieldLength = maxFieldLength;
     for(int i=0;i<threadStates.length;i++)
@@ -297,7 +309,15 @@
     for(int i=0;i<threadStates.length;i++)
       threadStates[i].docState.similarity = similarity;
   }
-
+  
+  synchronized void setRAMDirSizeMB(double mb) {
+    if (mb == IndexWriter.DISABLE_AUTO_FLUSH) {
+      ramDirSizeMax = IndexWriter.DISABLE_AUTO_FLUSH;
+    } else {
+      ramDirSizeMax = (long) (mb*1024*1024);
+    }
+  }
+  
   /** Set how much RAM we can use before flushing. */
   synchronized void setRAMBufferSizeMB(double mb) {
     if (mb == IndexWriter.DISABLE_AUTO_FLUSH) {
@@ -312,7 +332,7 @@
       freeLevel = (long) (0.95 * ramBufferSize);
     }
   }
-
+  
   synchronized double getRAMBufferSizeMB() {
     if (ramBufferSize == IndexWriter.DISABLE_AUTO_FLUSH) {
       return ramBufferSize;
@@ -320,6 +340,75 @@
       return ramBufferSize/1024./1024.;
     }
   }
+  
+  long getRamBufferAvailable() {
+    long v = ramBufferSize - numBytesUsed;
+    if (v < 0) return 0;
+    return v;
+  }
+  
+  long getTotalMax() {
+    return ramTotalMax;
+  }
+  
+  void setTotalMaxMB(double ramTotalMax) {
+    this.ramTotalMax = (long) (ramTotalMax*1024*1024);;
+  }
+  
+  /**
+   * See if we can grow the ram dir max by the given amount,
+   * if so return true.
+   * 
+   * Don't let the ram dir grow to be more than 80% of the total allowable.
+   * @param value
+   * @return
+   */
+  synchronized boolean growRamDirMaxBy(long value) {
+    long ramBufferAvail = getRamBufferAvailable();
+    long twenty = (long)((double)ramTotalMax * 0.20);
+    if (twenty >= ramBufferSize) {
+      return false;
+    }
+    if (ramBufferAvail > value) {
+      ramBufferSize -= value;
+      ramDirSizeMax += value;
+      setRAMBufferSizeMB(ramBufferSize/1024./1024.);
+      return true;
+    } else return false;
+  }
+  
+  /**
+   * Grow the ram buffer by the given amount, returns true 
+   * if we did, false if not.
+   * 
+   * @param value
+   * @return
+   */
+  synchronized boolean growRamBufferBy(long value) {
+    long ramDirAvail = getRamDirAvailable();
+    if (ramDirAvail > value) {
+      ramDirSizeMax -= value;
+      ramBufferSize += value;
+      setRAMBufferSizeMB(ramBufferSize/1024./1024.);
+      return true;
+    } else {
+      return false;
+    }
+  }
+  
+  long getRamDirAvailable() {
+    long v = ramDirSizeMax - ramDirSize;
+    if (v < 0) return 0;
+    return v;
+  }
+  
+  double getRAMDirSizeMB() {
+    if (ramDirSizeMax == IndexWriter.DISABLE_AUTO_FLUSH) {
+      return ramDirSizeMax;
+    } else {
+      return ramDirSizeMax/1024./1024.;
+    }
+  }
 
   /** Set max buffered docs, which means we will flush by
    *  doc count instead of by RAM usage. */
@@ -330,7 +419,15 @@
   int getMaxBufferedDocs() {
     return maxBufferedDocs;
   }
-
+  
+  long getRamDirSizeMax() {
+    return ramDirSizeMax;
+  }
+  
+  long getRamBufferSize() {
+    return ramBufferSize;
+  }
+  
   /** Get current segment name we are writing. */
   String getSegment() {
     return segment;
@@ -367,7 +464,7 @@
     boolean success = false;
 
     try {
-      initFlushState(true);
+      initFlushState(true, writer.directory); // doc stores always write to the primary directory
       closedFiles.clear();
 
       consumer.closeDocStore(flushState);
@@ -534,11 +631,11 @@
     return true;
   }
 
-  synchronized private void initFlushState(boolean onlyDocStore) {
+  synchronized private void initFlushState(boolean onlyDocStore, Directory directory) {
     initSegmentName(onlyDocStore);
     flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getTermIndexInterval());
   }
-
+  
   /** Flush all pending docs to a new segment */
   synchronized int flush(boolean closeDocStore) throws IOException {
 
@@ -549,9 +646,10 @@
     assert nextDocID == numDocsInRAM;
     assert waitQueue.numWaiting == 0;
     assert waitQueue.waitingBytes == 0;
+    
+    assert flushToDir != null;
+    initFlushState(false, flushToDir);
 
-    initFlushState(false);
-
     docStoreOffset = numDocsInStore;
 
     if (infoStream != null)
@@ -574,7 +672,7 @@
       consumer.flush(threads, flushState);
 
       if (infoStream != null) {
-        final long newSegmentSize = segmentSize(flushState.segmentName);
+        final long newSegmentSize = segmentSize(flushState.segmentName, flushToDir);
         String message = "  oldRAMSize=" + numBytesUsed +
           " newFlushedSize=" + newSegmentSize +
           " docs/MB=" + nf.format(numDocsInRAM/(newSegmentSize/1024./1024.)) +
@@ -600,8 +698,9 @@
   }
 
   /** Build compound file for the segment we just flushed */
-  void createCompoundFile(String segment) throws IOException {
-    
+  void createCompoundFile(String segment, Directory directory) throws IOException {
+    // if we're flushing to the writer.directory this is ok
+    assert directory == writer.directory;
     CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
     Iterator it = flushState.flushedFiles.iterator();
     while(it.hasNext())
@@ -623,9 +722,10 @@
       return true;
     }
   }
-
+  
   synchronized void clearFlushPending() {
     flushPending = false;
+    flushToDir = null;
   }
 
   synchronized void pushDeletes() {
@@ -763,7 +863,7 @@
       // This call is not synchronized and does all the
       // work
       final DocWriter perDoc = state.consumer.processDocument();
-        
+
       // This call is synchronized but fast
       finishDocument(state, perDoc);
       success = true;
@@ -914,11 +1014,13 @@
 
     int docStart = 0;
     boolean any = false;
+    Directory switchDir = writer.getInternalDirectory();
+    Directory directory = writer.directory;
     for (int i = 0; i < infosEnd; i++) {
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
+      assert infos.info(i).dir == directory || infos.info(i).dir == switchDir;
 
       SegmentReader reader = writer.readerPool.get(infos.info(i), false);
       try {
@@ -1018,16 +1120,16 @@
   synchronized private void addDeleteQuery(Query query, int docID) {
     deletesInRAM.queries.put(query, new Integer(flushedDocCount + docID));
   }
-
-  synchronized boolean doBalanceRAM() {
+  
+  synchronized boolean doBalanceRAM() {    
     return ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && !bufferIsFull && (numBytesUsed >= ramBufferSize || numBytesAlloc >= freeTrigger);
   }
-
+  
   /** Does the synchronized work to finish/flush the
    *  inverted document. */
   private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
-
-    if (doBalanceRAM())
+    // if we can't grow the ram buffer any more then we need to balance the ram
+    if (doBalanceRAM() || (docWriter != null && !growRamBufferBy((long)((double)docWriter.sizeInBytes()*1.05))))
       // Must call this w/o holding synchronized(this) else
       // we'll hit deadlock:
       balanceRAM();
@@ -1111,10 +1213,9 @@
 
   // TODO FI: this is not flexible -- we can't hardwire
   // extensions in here:
-  private long segmentSize(String segmentName) throws IOException {
+  private long segmentSize(String segmentName, Directory directory) throws IOException {
     // Used only when infoStream != null
     assert infoStream != null;
-    
     long size = directory.fileLength(segmentName + ".tii") +
       directory.fileLength(segmentName + ".tis") +
       directory.fileLength(segmentName + ".frq") +
@@ -1123,10 +1224,15 @@
     final String normFileName = segmentName + ".nrm";
     if (directory.fileExists(normFileName))
       size += directory.fileLength(normFileName);
-
+    
     return size;
   }
-
+  
+  public long fileLength(String name) throws IOException {
+    Directory dir = writer.getInternalDirectory();
+    return dir.fileLength(name);
+  }
+  
   // Coarse estimates used to measure RAM usage of buffered deletes
   final static int OBJECT_HEADER_BYTES = 8;
   final static int POINTER_NUM_BYTE = 4;
@@ -1280,13 +1386,14 @@
     if (numBytesAlloc > freeTrigger) {
 
       if (infoStream != null)
+        
         message("  RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) +
                 " vs trigger=" + toMB(flushTrigger) +
                 " allocMB=" + toMB(numBytesAlloc) +
                 " vs trigger=" + toMB(freeTrigger) +
                 " byteBlockFree=" + toMB(byteBlockAllocator.freeByteBlocks.size()*BYTE_BLOCK_SIZE) +
                 " charBlockFree=" + toMB(freeCharBlocks.size()*CHAR_BLOCK_SIZE*CHAR_NUM_BYTE));
-
+      printRamDirUsage();
       final long startBytesAlloc = numBytesAlloc;
 
       int iter = 0;
@@ -1352,13 +1459,34 @@
             message("  RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) +
                     " allocMB=" + nf.format(numBytesAlloc/1024./1024.) +
                     " triggerMB=" + nf.format(flushTrigger/1024./1024.));
-
+          printRamDirUsage();
           bufferIsFull = true;
         }
+        
       }
     }
   }
-
+  
+  void printRamDirUsage() {
+    printRamDirUsage("");
+  }
+  
+  void printRamDirUsage(String msg) {
+    if (infoStream != null && writer.ramNrt) {
+      String percentStr = "dir/buf ("+nf.format(100.0*getRamDirSizeMax() / getTotalMax())+"%/"+
+      nf.format(100.0*getRamBufferSize() / getTotalMax())+"%)";
+      
+      String ramPer = nf.format(100.0*getRamDirSize() / getRamDirSizeMax())+"%";
+      String bufPer = nf.format(100.0*numBytesUsed / getRamBufferSize())+"%";
+      String prefix = "";
+      if (msg != null && !msg.equals("")) {
+        prefix = msg+" ";
+      }
+      message(prefix+percentStr+"  ramDir("+toMB(getRamDirSize())+"/"+toMB(getRamDirSizeMax())+")"+ramPer+
+          " buf("+toMB(numBytesUsed)+"/"+toMB(getRamBufferSize())+")"+bufPer);
+    }
+  }
+  
   final WaitQueue waitQueue = new WaitQueue();
 
   private class WaitQueue {
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 776405)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -28,6 +28,8 @@
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.FileSwitchDirectory;
 import org.apache.lucene.util.Constants;
 
 import java.io.File;
@@ -286,6 +288,18 @@
    * set (see {@link #setInfoStream}).
    */
   public final static int MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;
+  
+  /**
+   * File extensions for the FileSwitchDirectory used when NRT is on
+   */
+  public final static Set SWITCH_FILE_EXTS = new HashSet();
+  static {
+    SWITCH_FILE_EXTS.add("fdx");
+    SWITCH_FILE_EXTS.add("fdt");
+    SWITCH_FILE_EXTS.add("tvx");
+    SWITCH_FILE_EXTS.add("tvf");
+    SWITCH_FILE_EXTS.add("tvd");
+  }
 
   /**
    * Default for {@link #getMaxSyncPauseSeconds}.  On
@@ -314,7 +328,7 @@
   private int messageID = -1;
   volatile private boolean hitOOM;
 
-  private Directory directory;  // where this index resides
+  Directory directory;  // where this index resides
   private Analyzer analyzer;    // how to analyze text
 
   private Similarity similarity = Similarity.getDefault(); // how to normalize
@@ -335,10 +349,10 @@
 
   private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
-  private DocumentsWriter docWriter;
+  DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
 
-  private Set segmentsToOptimize = new HashSet();           // used by optimize to note those needing optimization
+  Set segmentsToOptimize = new HashSet();           // used by optimize to note those needing optimization
 
   private Lock writeLock;
 
@@ -352,8 +366,10 @@
   // merges
   private HashSet mergingSegments = new HashSet();
 
-  private MergePolicy mergePolicy = new LogByteSizeMergePolicy();
-  private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
+  MergePolicy mergePolicy = new LogByteSizeMergePolicy();
+  RAMMergePolicy ramMergePolicy = new RAMMergePolicy();
+  private NRTMergePolicy nrtMergePolicy;
+  MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
   private LinkedList pendingMerges = new LinkedList();
   private Set runningMerges = new HashSet();
   private List mergeExceptions = new ArrayList();
@@ -370,6 +386,7 @@
   private Thread writeThread;                     // non-null if any thread holds write lock
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
+  FileSwitchDirectory switchDirectory;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -382,6 +399,8 @@
   // readers.
   private volatile boolean poolReaders;
   
+  boolean ramNrt;
+  
   /**
    * Expert: returns a readonly reader containing all
    * current updates.  Flush is called automatically.  This
@@ -393,7 +412,7 @@
    * guarantee on how quickly you can get a new reader after
    * making changes with IndexWriter.  You'll have to
    * experiment in your situation to determine if it's
-   * faster enough.  As this is a new and experimental
+   * fast enough.  As this is a new and experimental
    * feature, please report back on your findings so we can
    * learn, improve and iterate.</p>
    *
@@ -441,7 +460,9 @@
     // this method is called:
     poolReaders = true;
 
-    flush(true, true, true);
+    // when getReader is called we assume we want to try to flush to ram
+    // (we may not be able to flush to ram as there may not be enough available ram)
+    flush(true, true, true, true);
     
     // Prevent segmentInfos from changing while opening the
     // reader; in theory we could do similar retry logic,
@@ -450,7 +471,31 @@
       return new ReadOnlyMultiSegmentReader(this, segmentInfos);
     }
   }
-
+  
+  /**
+   * Ram directory (switchDirectory) segments reader
+   */
+  // for testing
+  IndexReader getRamReader() throws IOException {
+    flush(true, true, true, true);
+    
+    synchronized(this) {
+      return new ReadOnlyMultiSegmentReader(this, getRamDirSegmentInfos());
+    }
+  }
+  
+  /**
+   * Primary directory segments reader
+   */
+  //for testing
+  IndexReader getPrimaryReader() throws IOException {
+    flush(true, true, true, true);
+    
+    synchronized(this) {
+      return new ReadOnlyMultiSegmentReader(this, getInfosByDir(segmentInfos, directory));
+    }
+  }
+  
   /** Holds shared SegmentReader instances. IndexWriter uses
    *  SegmentReaders for 1) applying deletes, 2) doing
    *  merges, 3) handing out a real-time reader.  This pool
@@ -461,7 +506,7 @@
   class ReaderPool {
 
     private final Map readerMap = new HashMap();
-
+    
     /** Forcefully clear changes for the specifed segments,
      *  and remove from the pool.   This is called on succesful merge. */
     synchronized void clear(SegmentInfos infos) throws IOException {
@@ -680,7 +725,16 @@
     assert readCount >= 0;
     notifyAll();
   }
-
+  
+  boolean isRamMerge(MergePolicy.OneMerge merge) {
+    if (ramNrt) {
+      if (merge.directory == switchDirectory) return true;
+      return false;
+    } else {
+      return false;
+    }
+  }
+  
   synchronized final boolean isOpen(boolean includePendingClose) {
     return !(closed || (includePendingClose && closing));
   }
@@ -725,11 +779,16 @@
    * an exception if the mergePolicy is not a LogMergePolicy.
    */
   private LogMergePolicy getLogMergePolicy() {
-    if (mergePolicy instanceof LogMergePolicy)
-      return (LogMergePolicy) mergePolicy;
+    MergePolicy mp = mergePolicy;
+    if (mp instanceof LogMergePolicy)
+      return (LogMergePolicy) mp;
     else
       throw new IllegalArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
   }
+  
+  public MergePolicy getRAMMergePolicy() {
+    return ramMergePolicy;
+  }
 
   /** <p>Get the current setting of whether newly flushed
    *  segments will use the compound file format.  Note that
@@ -849,11 +908,43 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit(), null, null);
   }
-
+  
   /**
    * Constructs an IndexWriter for the index in <code>path</code>.
    * Text will be analyzed with <code>a</code>.  If <code>create</code>
    * is true, then a new, empty index will be created in
+   * <code>path</code>, replacing the index already there,
+   * if any.
+   *
+   * <p><b>NOTE</b>: autoCommit (see <a
+   * href="#autoCommit">above</a>) is set to false with this
+   * constructor.
+   *
+   * @param path the path to the index directory
+   * @param a the analyzer to use
+   * @param create <code>true</code> to create the index or overwrite
+   *  the existing one; <code>false</code> to append to the existing
+   *  index
+   * @param mfl Maximum field length in number of tokens/terms: LIMITED, UNLIMITED, or user-specified
+   *   via the MaxFieldLength constructor.
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws LockObtainFailedException if another writer
+   *  has this index open (<code>write.lock</code> could not
+   *  be obtained)
+   * @throws IOException if the directory cannot be read/written to, or
+   *  if it does not exist and <code>create</code> is
+   *  <code>false</code> or if there is any other low-level
+   *  IO error
+   */
+  public IndexWriter(String path, Analyzer a, boolean create, MaxFieldLength mfl, boolean flushToRam)
+       throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit(), null, null);
+  }
+  
+  /**
+   * Constructs an IndexWriter for the index in <code>path</code>.
+   * Text will be analyzed with <code>a</code>.  If <code>create</code>
+   * is true, then a new, empty index will be created in
    * <code>path</code>, replacing the index already there, if any.
    *
    * @param path the path to the index directory
@@ -969,18 +1060,25 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, create, false, null, false, mfl.getLimit(), null, null);
   }
-
+  
   /**
    * Constructs an IndexWriter for the index in <code>d</code>.
    * Text will be analyzed with <code>a</code>.  If <code>create</code>
    * is true, then a new, empty index will be created in
    * <code>d</code>, replacing the index already there, if any.
    *
+   * <p><b>NOTE</b>: autoCommit (see <a
+   * href="#autoCommit">above</a>) is set to false with this
+   * constructor.
+   *
    * @param d the index directory
    * @param a the analyzer to use
    * @param create <code>true</code> to create the index or overwrite
    *  the existing one; <code>false</code> to append to the existing
    *  index
+   * @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+   *   via the MaxFieldLength constructor.
+   * @param flushToRam If many segments are being created opened for reading (@see #getReader) set this to <code>true</code>
    * @throws CorruptIndexException if the index is corrupt
    * @throws LockObtainFailedException if another writer
    *  has this index open (<code>write.lock</code> could not
@@ -989,6 +1087,31 @@
    *  if it does not exist and <code>create</code> is
    *  <code>false</code> or if there is any other low-level
    *  IO error
+   */
+  public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl, boolean flushToRam)
+       throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, create, false, null, false, mfl.getLimit(), null, null, flushToRam, null);
+  }
+  
+  /**
+   * Constructs an IndexWriter for the index in <code>d</code>.
+   * Text will be analyzed with <code>a</code>.  If <code>create</code>
+   * is true, then a new, empty index will be created in
+   * <code>d</code>, replacing the index already there, if any.
+   *
+   * @param d the index directory
+   * @param a the analyzer to use
+   * @param create <code>true</code> to create the index or overwrite
+   *  the existing one; <code>false</code> to append to the existing
+   *  index
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws LockObtainFailedException if another writer
+   *  has this index open (<code>write.lock</code> could not
+   *  be obtained)
+   * @throws IOException if the directory cannot be read/written to, or
+   *  if it does not exist and <code>create</code> is
+   *  <code>false</code> or if there is any other low-level
+   *  IO error
    * @deprecated This constructor will be removed in the 3.0
    *  release, and call {@link #commit()} when needed.
    *  Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead.
@@ -1099,7 +1222,7 @@
     throws CorruptIndexException, LockObtainFailedException, IOException {
     init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
-
+  
   /**
    * Constructs an IndexWriter for the index in
    * <code>d</code>, first creating it if it does not
@@ -1126,15 +1249,22 @@
     throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, false, null, false, mfl.getLimit(), null, null);
   }
-
+  
   /**
    * Constructs an IndexWriter for the index in
    * <code>d</code>, first creating it if it does not
    * already exist.  Text will be analyzed with
    * <code>a</code>.
    *
+   * <p><b>NOTE</b>: autoCommit (see <a
+   * href="#autoCommit">above</a>) is set to false with this
+   * constructor.
+   *
    * @param d the index directory
    * @param a the analyzer to use
+   * @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+   *   via the MaxFieldLength constructor.
+   * @param flushToRam If many segments are being created opened for reading (@see #getReader) set this to <code>true</code>
    * @throws CorruptIndexException if the index is corrupt
    * @throws LockObtainFailedException if another writer
    *  has this index open (<code>write.lock</code> could not
@@ -1142,6 +1272,32 @@
    * @throws IOException if the directory cannot be
    *  read/written to or if there is any other low-level
    *  IO error
+   */
+  public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl, boolean flushToRam)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, false, null, false, mfl.getLimit(), null, null, flushToRam);
+  }
+  
+  IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl, boolean flushToRam, Directory ramDir)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, false, null, false, mfl.getLimit(), null, null, flushToRam, ramDir);
+  }
+  
+  /**
+   * Constructs an IndexWriter for the index in
+   * <code>d</code>, first creating it if it does not
+   * already exist.  Text will be analyzed with
+   * <code>a</code>.
+   *
+   * @param d the index directory
+   * @param a the analyzer to use
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws LockObtainFailedException if another writer
+   *  has this index open (<code>write.lock</code> could not
+   *  be obtained)
+   * @throws IOException if the directory cannot be
+   *  read/written to or if there is any other low-level
+   *  IO error
    * @deprecated This constructor will be removed in the 3.0 release.
    *  Use {@link
    *  #IndexWriter(Directory,Analyzer,MaxFieldLength)}
@@ -1261,7 +1417,7 @@
     throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
-  
+
   /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy}, for the index in <code>d</code>.
@@ -1294,7 +1450,7 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, create, false, deletionPolicy, false, mfl.getLimit(), null, null);
   }
-  
+
   /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy} and {@link IndexingChain}, 
@@ -1331,7 +1487,7 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, create, false, deletionPolicy, false, mfl.getLimit(), indexingChain, commit);
   }
-  
+
   /**
    * Expert: constructs an IndexWriter with a custom {@link
    * IndexDeletionPolicy}, for the index in <code>d</code>.
@@ -1405,27 +1561,52 @@
        throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, false, false, deletionPolicy, false, mfl.getLimit(), null, commit);
   }
-
+  
   private void init(Directory d, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, 
-                    boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
+      boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
     throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit, false);
+  }
+  
+  private void init(Directory d, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, 
+      boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit, boolean flushToRam)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit, flushToRam, (Directory)null);
+  }
+  
+  private void init(Directory d, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, 
+                    boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit, boolean flushToRam, Directory ramDir)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
     if (IndexReader.indexExists(d)) {
-      init(d, a, false, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+      init(d, a, false, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit, flushToRam, ramDir);
     } else {
-      init(d, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+      init(d, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit, flushToRam, ramDir);
     }
   }
-
+  
   private void init(Directory d, Analyzer a, final boolean create, boolean closeDir, 
+      IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength,
+      IndexingChain indexingChain, IndexCommit commit) throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, a, create, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit, false, (Directory)null);
+  }
+  
+  private void init(Directory d, Analyzer a, final boolean create, boolean closeDir, 
                     IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength,
-                    IndexingChain indexingChain, IndexCommit commit)
+                    IndexingChain indexingChain, IndexCommit commit, boolean ramNrt, Directory ramDir)
     throws CorruptIndexException, LockObtainFailedException, IOException {
     this.closeDir = closeDir;
     directory = d;
+    this.ramNrt = ramNrt;
+    if (ramDir == null) ramDir = new RAMDirectory();
+    this.switchDirectory = new FileSwitchDirectory(SWITCH_FILE_EXTS, directory, ramDir, false);
     analyzer = a;
     setMessageID(defaultInfoStream);
     this.maxFieldLength = maxFieldLength;
 
+    if (ramNrt) {
+      nrtMergePolicy = new NRTMergePolicy(this);
+    }
+    
     if (indexingChain == null)
       indexingChain = DocumentsWriter.DefaultIndexingChain;
 
@@ -1493,13 +1674,15 @@
       this.autoCommit = autoCommit;
       setRollbackSegmentInfos(segmentInfos);
 
-      docWriter = new DocumentsWriter(directory, this, indexingChain);
+      docWriter = new DocumentsWriter(this, indexingChain);
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
-
+      if (ramNrt) {
+        setRAMBufferSizeMB(IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB); // initialize ram dir size
+      }
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
-      deleter = new IndexFileDeleter(directory,
+      deleter = new IndexFileDeleter(switchDirectory, 
                                      deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
                                      segmentInfos, infoStream, docWriter);
 
@@ -1523,20 +1706,61 @@
       throw e;
     }
   }
-
+  
+  Directory getInternalDirectory() {
+    if (ramNrt)
+      return switchDirectory;
+    else return directory; 
+  }
+  
+  synchronized long getRamDirSize() {
+    return ((RAMDirectory)switchDirectory.getSecondaryDir()).sizeInBytes();
+  }
+  
+  static long size(Directory dir) throws IOException {
+    String[] files = dir.listAll();
+    long c = 0;
+    for (int x=0; x < files.length; x++) {
+      c +=dir.fileLength(files[x]);
+    }
+    return c;
+  }
+  
+  public Directory getRAMDirectory() {
+    return switchDirectory.getSecondaryDir();
+  }
+  
   private synchronized void setRollbackSegmentInfos(SegmentInfos infos) {
     rollbackSegmentInfos = (SegmentInfos) infos.clone();
-    assert !rollbackSegmentInfos.hasExternalSegments(directory);
+    assert !rollbackSegmentInfos.hasExternalSegments(getInternalDirectory());
     rollbackSegments = new HashMap();
     final int size = rollbackSegmentInfos.size();
     for(int i=0;i<size;i++)
       rollbackSegments.put(rollbackSegmentInfos.info(i), new Integer(i));
   }
-
+  
   /**
+   * Expert: set the ram directory merge policy used by this writer.
+   */
+  public void setRAMMergePolicy(RAMMergePolicy mp) {
+    ensureOpen();
+    if (mp == null)
+      throw new NullPointerException("RAMMergePolicy must be non-null");
+    if (ramMergePolicy != null) {
+      if (ramMergePolicy != mp)
+        ramMergePolicy.close();
+    }
+    // what to do with this setting when RAM NRT is on?
+    //pushMaxBufferedDocs();
+    if (infoStream != null)
+      message("setRAMMergePolicy " + mp);
+  }
+  
+  /**
    * Expert: set the merge policy used by this writer.
    */
   public void setMergePolicy(MergePolicy mp) {
+    // TODO: set this to be NRTMergePolicy friendly
     ensureOpen();
     if (mp == null)
       throw new NullPointerException("MergePolicy must be non-null");
@@ -1564,8 +1788,7 @@
   synchronized public void setMergeScheduler(MergeScheduler mergeScheduler) throws CorruptIndexException, IOException {
     ensureOpen();
     if (mergeScheduler == null)
-      throw new NullPointerException("MergeScheduler must be non-null");
-
+      throw new NullPointerException("MergeScheduler must be non-null");  
     if (this.mergeScheduler != mergeScheduler) {
       finishMerges(true);
       this.mergeScheduler.close();
@@ -1574,9 +1797,9 @@
     if (infoStream != null)
       message("setMergeScheduler " + mergeScheduler);
   }
-
+  
   /**
-   * Expert: returns the current MergePolicy in use by this
+   * Expert: returns the current MergeScheduler in use by this
    * writer.
    * @see #setMergePolicy
    */
@@ -1584,7 +1807,7 @@
     ensureOpen();
     return mergeScheduler;
   }
-
+  
   /** <p>Determines the largest segment (measured by
    * document count) that may be merged with other segments.
    * Small values (e.g., less than 10,000) are best for
@@ -1746,11 +1969,22 @@
     if (mb == DISABLE_AUTO_FLUSH && getMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
       throw new IllegalArgumentException(
           "at least one of ramBufferSize and maxBufferedDocs must be enabled");
-    docWriter.setRAMBufferSizeMB(mb);
+    
+    if (ramNrt) {
+      // if NRT is on, then set the ramBufferSize to half
+      // set the ramDirSize to the other half
+      double ramMB = mb * 0.5;
+      docWriter.setTotalMaxMB(mb);
+      docWriter.setRAMBufferSizeMB(ramMB);
+      docWriter.setRAMDirSizeMB(ramMB);
+    } else {
+      docWriter.setRAMBufferSizeMB(mb);
+      docWriter.setTotalMaxMB(mb);
+    }
     if (infoStream != null)
       message("setRAMBufferSizeMB " + mb);
   }
-
+  
   /**
    * Returns the value set by {@link #setRAMBufferSizeMB} if enabled.
    */
@@ -2064,9 +2298,10 @@
       if (waitForMerges)
         // Give merge scheduler last chance to run, in case
         // any pending merges are waiting:
-        mergeScheduler.merge(this);
+        scheduleMerge();
 
       mergePolicy.close();
+      if (ramMergePolicy != null) ramMergePolicy.close();
 
       finishMerges(waitForMerges);
 
@@ -2088,9 +2323,12 @@
         deleter.close();
       }
       
-      if (closeDir)
+      if (closeDir) {
         directory.close();
-
+        Directory ramDir = switchDirectory.getSecondaryDir(); 
+        ramDir.close();
+      }
+      
       if (writeLock != null) {
         writeLock.release();                          // release write lock
         writeLock = null;
@@ -2118,7 +2356,7 @@
    *  doc stores (stored fields & vectors files).
    *  Return value specifices whether new doc store files are compound or not.
    */
-  private synchronized boolean flushDocStores() throws IOException {
+  private synchronized boolean flushDocStores(Directory dir) throws IOException {
 
     boolean useCompoundDocStore = false;
 
@@ -2134,8 +2372,8 @@
       }
     }
 
-    useCompoundDocStore = mergePolicy.useCompoundDocStore(segmentInfos);
-      
+    useCompoundDocStore = getCurrentMergePolicy().useCompoundDocStore(segmentInfos, dir);
+    useCompoundDocStore = false;
     if (useCompoundDocStore && docStoreSegment != null && docWriter.closedFiles().size() != 0) {
       // Now build compound doc store file
 
@@ -2149,7 +2387,7 @@
       final String compoundFileName = docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;
 
       try {
-        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
+        CompoundFileWriter cfsWriter = new CompoundFileWriter(dir, compoundFileName);
         final Iterator it = docWriter.closedFiles().iterator();
         while(it.hasNext())
           cfsWriter.addFile((String) it.next());
@@ -2531,7 +2769,24 @@
       handleOOM(oom, "updateDocument");
     }
   }
-
+  
+  //for test purpose
+  final synchronized SegmentInfos getSegmentInfos() {
+    return segmentInfos;
+  }
+  
+  //for test purpose
+  final synchronized SegmentInfos getSegmentInfos(Directory dir) {
+    SegmentInfos infos = new SegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        infos.add(info);
+      }
+    }
+    return infos;
+  }
+  
   // for test purpose
   final synchronized int getSegmentCount(){
     return segmentInfos.size();
@@ -2690,7 +2945,7 @@
     if (infoStream != null)
       message("optimize: index now " + segString());
 
-    flush(true, false, true);
+    flush(true, false, true, true); // if there's room, flush the buffer to the ramdir
 
     synchronized(this) {
       resetMergeExceptions();
@@ -2706,6 +2961,7 @@
         final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
         merge.optimize = true;
         merge.maxNumSegmentsOptimize = maxNumSegments;
+        merge.directory = directory;
       }
 
       it = runningMerges.iterator();
@@ -2713,6 +2969,7 @@
         final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
         merge.optimize = true;
         merge.maxNumSegmentsOptimize = maxNumSegments;
+        merge.directory = directory;
       }
     }
 
@@ -2733,7 +2990,7 @@
             for(int i=0;i<size;i++) {
               final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) mergeExceptions.get(0);
               if (merge.optimize) {
-                IOException err = new IOException("background merge hit exception: " + merge.segString(directory));
+                IOException err = new IOException("background merge hit exception: " + merge.segString());
                 final Throwable t = merge.getException();
                 if (t != null)
                   err.initCause(t);
@@ -2777,6 +3034,15 @@
     return false;
   }
 
+  MergePolicy getCurrentMergePolicy() {
+    if (ramNrt) assert nrtMergePolicy != null;
+    if (nrtMergePolicy != null) {
+      assert ramNrt;
+      return nrtMergePolicy;
+    }
+    return mergePolicy;
+  }
+  
   /** Just like {@link #expungeDeletes()}, except you can
    *  specify whether the call should block until the
    *  operation completes.  This is only meaningful with a
@@ -2797,7 +3063,7 @@
     MergePolicy.MergeSpecification spec;
 
     synchronized(this) {
-      spec = mergePolicy.findMergesToExpungeDeletes(segmentInfos, this);
+      spec = getCurrentMergePolicy().findMergesToExpungeDeletes(segmentInfos, this);
       if (spec != null) {
         final int numMerges = spec.merges.size();
         for(int i=0;i<numMerges;i++)
@@ -2805,7 +3071,7 @@
       }
     }
 
-    mergeScheduler.merge(this);
+    scheduleMerge();
 
     if (spec != null && doWait) {
       final int numMerges = spec.merges.size();
@@ -2827,7 +3093,7 @@
               running = true;
             Throwable t = merge.getException();
             if (t != null) {
-              IOException ioe = new IOException("background merge hit exception: " + merge.segString(directory));
+              IOException ioe = new IOException("background merge hit exception: " + merge.segString());
               ioe.initCause(t);
               throw ioe;
             }
@@ -2892,12 +3158,73 @@
   private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException {
     maybeMerge(1, optimize);
   }
-
+  
+  long getRamBufferSizeLong() {
+    return (long)(getRAMBufferSizeMB()*(double)1024*(double)1024);
+  }
+  
   private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
     updatePendingMerges(maxNumSegmentsOptimize, optimize);
+    scheduleMerge();
+  }
+  
+  /**
+   * Merge the ram segments to disk and optimize
+   * them to minimize future disk merges.
+   * 
+   * This method is asynchronous
+   * 
+   * @param doWait If true do the merge in this thread
+   * @throws IOException
+   */
+  void mergeRamSegmentsToDir() throws IOException {
+    assert ramNrt && nrtMergePolicy != null;
+    boolean hasMerges = false;
+    synchronized (this) {
+      SegmentInfos ramInfos = getRamDirSegmentInfos();
+      if (ramInfos.size() > 0) {
+        segmentsToOptimize = new HashSet();
+        final int numSegments = ramInfos.size();
+        for(int i=0;i<numSegments;i++)
+          segmentsToOptimize.add(ramInfos.info(i));
+        MergePolicy.MergeSpecification spec = nrtMergePolicy.findMergesForOptimize(ramInfos,
+            this,
+            1,
+            segmentsToOptimize);
+        final int numMerges = spec.merges.size();
+        for(int i=0;i<numMerges;i++) {
+          hasMerges = true;
+          MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(i);
+          merge.directory = directory; // set the destination directory
+          if (registerMerge(merge)) {
+          }
+        }
+      }
+    }
+  }
+  
+  void scheduleMerge() throws IOException {
     mergeScheduler.merge(this);
   }
-
+  
+  private static boolean allInfosFromDir(MergePolicy.MergeSpecification spec, Directory dir) {
+    SegmentInfos infos = getInfos(spec);
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir != dir) return false;
+    }
+    return true;
+  }
+  
+  private static SegmentInfos getInfos(MergePolicy.MergeSpecification spec) {
+    SegmentInfos infos = new SegmentInfos();
+    for (int x=0; x < spec.merges.size(); x++) {
+      MergePolicy.OneMerge merge = (MergePolicy.OneMerge)spec.merges.get(x);
+      infos.addAll(merge.segments);
+    }
+    return infos;
+  }
+  
   private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize)
     throws CorruptIndexException, IOException {
     assert !optimize || maxNumSegmentsOptimize > 0;
@@ -2912,24 +3239,27 @@
 
     final MergePolicy.MergeSpecification spec;
     if (optimize) {
-      spec = mergePolicy.findMergesForOptimize(segmentInfos, this, maxNumSegmentsOptimize, segmentsToOptimize);
-
+      spec = getCurrentMergePolicy().findMergesForOptimize(segmentInfos, this, maxNumSegmentsOptimize, segmentsToOptimize);
       if (spec != null) {
         final int numMerges = spec.merges.size();
         for(int i=0;i<numMerges;i++) {
           final MergePolicy.OneMerge merge = ((MergePolicy.OneMerge) spec.merges.get(i));
           merge.optimize = true;
           merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
+          merge.directory = directory;
         }
       }
 
     } else
-      spec = mergePolicy.findMerges(segmentInfos, this);
+      spec = getCurrentMergePolicy().findMerges(segmentInfos, this);
 
     if (spec != null) {
       final int numMerges = spec.merges.size();
-      for(int i=0;i<numMerges;i++)
-        registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
+      for(int i=0;i<numMerges;i++) {
+        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(i);
+        if (merge.directory == null) merge.directory = directory; // set the destination directory
+        registerMerge(merge);
+      }
     }
   }
 
@@ -2946,7 +3276,27 @@
       return merge;
     }
   }
+  
+  synchronized MergePolicy.OneMerge getNextMerge(Directory directory) {
+    if (pendingMerges.size() == 0)
+      return null;
+    else {
+      Iterator it = pendingMerges.iterator();
+      while(it.hasNext()) {
+        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
+        if (merge.directory == directory) {
+          // Advance the merge from pending to running
+          it.remove();
+          runningMerges.add(merge);
+          return merge;
+        }
+      }
 
+      // All existing merges do not involve external segments
+      return null;
+    }
+  }
+
   /** Like getNextMerge() except only returns a merge if it's
    *  external. */
   private synchronized MergePolicy.OneMerge getNextExternalMerge() {
@@ -3249,7 +3599,7 @@
       while(it.hasNext()) {
         final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
         if (infoStream != null)
-          message("now abort pending merge " + merge.segString(directory));
+          message("now abort pending merge " + merge.segString());
         merge.abort();
         mergeFinish(merge);
       }
@@ -3259,7 +3609,7 @@
       while(it.hasNext()) {
         final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
         if (infoStream != null)
-          message("now abort running merge " + merge.segString(directory));
+          message("now abort running merge " + merge.segString());
         merge.abort();
       }
 
@@ -3552,9 +3902,83 @@
   }
 
   private boolean hasExternalSegments() {
-    return segmentInfos.hasExternalSegments(directory);
+    return segmentInfos.hasExternalSegments(getInternalDirectory());
   }
+  
+  /**
+   * Like resolveExternalSegments, in the current thread merges 
+   * all segments to one primary dir segment.
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  void resolveRamSegments() throws CorruptIndexException, IOException {
+    boolean any = false;
 
+    boolean done = false;
+    MergePolicy.OneMerge merge = null;
+    while(!done) {
+      MergePolicy.MergeSpecification spec = null;
+      
+      synchronized(this) {
+
+        if (stopMerges)
+          throw new MergePolicy.MergeAbortedException("rollback() was called or addIndexes* hit an unhandled exception");
+
+        SegmentInfos ramInfos = getRamDirSegmentInfos();
+        segmentsToOptimize = new HashSet();
+        for (int x=0; x < ramInfos.size(); x++)
+          segmentsToOptimize.add(ramInfos.info(x));
+        spec = ramMergePolicy.findMergesForOptimize(ramInfos, this, 1, segmentsToOptimize);
+        done = true;
+        if (spec == null) continue;
+        for(int i=0; i < spec.merges.size(); i++) {
+          final MergePolicy.OneMerge newMerge = (MergePolicy.OneMerge)spec.merges.get(i);
+          done = false;
+          newMerge.directory = directory; 
+          // Returns true if no running merge conflicts
+          // with this one (and, records this merge as
+          // pending), ie, this segment is not currently
+          // being merged:
+          if (registerMerge(newMerge)) {
+            merge = newMerge;
+
+            // If this segment is not currently being
+            // merged, then advance it to running & run
+            // the merge ourself (below):
+            pendingMerges.remove(merge);
+            runningMerges.add(merge);
+            break;
+          }
+        }
+
+        if (!done && merge == null)
+          // We are not yet done (external segments still
+          // exist in segmentInfos), yet, all such segments
+          // are currently "covered" by a pending or running
+          // merge.  We now try to grab any pending merge
+          // that involves external segments:
+          merge = getNextExternalMerge();
+
+        if (!done && merge == null)
+          // We are not yet done, and, all external segments
+          // fall under merges that the merge scheduler is
+          // currently running.  So, we now wait and check
+          // back to see if the merge has completed.
+          doWait();
+      }
+
+      if (merge != null) {
+        any = true;
+        merge(merge);
+      }
+    }
+
+    if (any)
+      // Sometimes, on copying an external segment over,
+      // more merges may become necessary:
+      scheduleMerge();
+  }
+  
   /* If any of our segments are using a directory != ours
    * then we have to either copy them over one by one, merge
    * them (if merge policy has chosen to) or wait until
@@ -3583,8 +4007,8 @@
           info = segmentInfos.info(i);
           if (info.dir != directory) {
             done = false;
-            final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), info.getUseCompoundFile());
-
+            final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), info.getUseCompoundFile(), this);
+            newMerge.directory = directory; 
             // Returns true if no running merge conflicts
             // with this one (and, records this merge as
             // pending), ie, this segment is not currently
@@ -3627,9 +4051,9 @@
     if (any)
       // Sometimes, on copying an external segment over,
       // more merges may become necessary:
-      mergeScheduler.merge(this);
+      scheduleMerge();
   }
-
+  
   /** Merges the provided indexes into this index.
    * <p>After this completes, the index is optimized. </p>
    * <p>The provided IndexReaders are not closed.</p>
@@ -3873,7 +4297,6 @@
   }
 
   private final void prepareCommit(String commitUserData, boolean internal) throws CorruptIndexException, IOException {
-
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
     }
@@ -3886,9 +4309,10 @@
 
     if (infoStream != null)
       message("prepareCommit: flush");
-
-    flush(true, true, true);
-
+    
+    flush(true, true, true, false);
+    // merge ram segments to disk synchronously
+    resolveRamSegments();
     startCommit(0, commitUserData);
   }
 
@@ -3983,9 +4407,38 @@
     if (infoStream != null)
       message("commit: done");
   }
-
+  
+  public static SegmentInfos getInfosByDir(SegmentInfos infos, Directory dir) throws IOException {
+    SegmentInfos dirInfos = new SegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        dirInfos.add(info);
+      }
+    }
+    return dirInfos;
+  }
+  
+  synchronized SegmentInfos getRamDirSegmentInfos() throws IOException {
+    return getInfosByDir(segmentInfos, switchDirectory);
+  }
+  
   /**
-   * Flush all in-memory buffered udpates (adds and deletes)
+   * Flush all in-memory buffered updates (adds and deletes)
+   * to the Directory.  By default we flush to the ram dir
+   * @param triggerMerge if true, we may merge segments (if
+   *  deletes or docs were flushed) if necessary
+   * @param flushDocStores if false we are allowed to keep
+   *  doc stores open to share with the next segment
+   * @param flushDeletes whether pending deletes should also
+   *  be flushed
+   */
+  protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+    flush(triggerMerge, flushDocStores, flushDeletes, (ramNrt ? true : false));
+  }
+  
+  /**
+   * Flush all in-memory buffered updates (adds and deletes)
    * to the Directory.
    * @param triggerMerge if true, we may merge segments (if
    *  deletes or docs were flushed) if necessary
@@ -3994,19 +4447,21 @@
    * @param flushDeletes whether pending deletes should also
    *  be flushed
    */
-  protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
-    // We can be called during close, when closing==true, so we must pass false to ensureOpen:
+  protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes, boolean flushToRam) throws CorruptIndexException, IOException {
     ensureOpen(false);
-    if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
+    
+    if (doFlush(flushDocStores, flushDeletes, flushToRam) && triggerMerge) {
+      message("flush maybe merge");
       maybeMerge();
+    }
   }
 
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
   // even while a flush is happening
-  private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+  private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes, boolean flushToRam) throws CorruptIndexException, IOException {
     try {
-      return doFlushInternal(flushDocStores, flushDeletes);
+      return doFlushInternal(flushDocStores, flushDeletes, flushToRam);
     } finally {
       docWriter.clearFlushPending();
     }
@@ -4015,7 +4470,7 @@
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
   // even while a flush is happening
-  private synchronized final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+  private synchronized final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes, boolean flushToRam) throws CorruptIndexException, IOException {
 
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
@@ -4026,7 +4481,7 @@
     assert testPoint("startDoFlush");
 
     flushCount++;
-
+    
     flushDeletes |= docWriter.deletesFull();
 
     // When autoCommit=true we must always flush deletes
@@ -4048,10 +4503,24 @@
       SegmentInfo newSegment = null;
 
       final int numDocs = docWriter.getNumDocsInRAM();
-
       // Always flush docs if there are any
       boolean flushDocs = numDocs > 0;
-
+      
+      // if RAM NRT set the directory to flush based on the amount of
+      // ram dir available
+      if (ramNrt && flushToRam) {
+        long bytesUsed = docWriter.numBytesUsed;
+        docWriter.growRamDirMaxBy(bytesUsed);
+        long ramDirAvail = docWriter.getRamDirAvailable();
+        if (bytesUsed >= ramDirAvail) { // no space in the ram dir
+          docWriter.flushToDir = directory;
+        } else {
+          docWriter.flushToDir = switchDirectory;
+        }
+      } else {
+        docWriter.flushToDir = directory;
+      }
+      
       // With autoCommit=true we always must flush the doc
       // stores when we flush
       flushDocStores |= autoCommit;
@@ -4090,7 +4559,7 @@
         if (infoStream != null)
           message("  flush shared docStore segment " + docStoreSegment);
       
-        docStoreIsCompoundFile = flushDocStores();
+        docStoreIsCompoundFile = flushDocStores(docWriter.flushToDir);
         flushDocStores = false;
       }
 
@@ -4106,6 +4575,9 @@
 
         try {
           flushedDocCount = docWriter.flush(flushDocStores);
+          if (ramNrt) {
+            docWriter.pushRamDirSize();
+          }
           success = true;
         } finally {
           if (!success) {
@@ -4129,9 +4601,13 @@
         // Create new SegmentInfo, but do not add to our
         // segmentInfos until deletes are flushed
         // successfully.
+        assert docWriter.flushToDir != null;
+        if (docWriter.flushToDir == switchDirectory) {
+          assert !docStoreIsCompoundFile;
+        }
         newSegment = new SegmentInfo(segment,
                                      flushedDocCount,
-                                     directory, false, true,
+                                     docWriter.flushToDir, false, true,
                                      docStoreOffset, docStoreSegment,
                                      docStoreIsCompoundFile,    
                                      docWriter.hasProx());
@@ -4145,18 +4621,19 @@
       if (flushDeletes) {
         flushDeletesCount++;
         applyDeletes();
+        docWriter.pushRamDirSize();
       }
       
-      doAfterFlush();
+      doAfterFlush(); // pushramdirsize here instead?
 
       if (flushDocs)
         checkpoint();
 
-      if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
+      if (flushDocs && getCurrentMergePolicy().useCompoundFile(segmentInfos, newSegment)) {
         // Now build compound file
         boolean success = false;
         try {
-          docWriter.createCompoundFile(segment);
+          docWriter.createCompoundFile(segment, directory);
           success = true;
         } finally {
           if (!success) {
@@ -4169,7 +4646,16 @@
         newSegment.setUseCompoundFile(true);
         checkpoint();
       }
-
+      
+      // we need to see if the ram dir is over limit
+      // if it is we return true here.  
+      // TODO: should we have some standard checking method in docWriter for this?
+      if (ramNrt && docWriter.getRamDirSize() >= docWriter.getRamBufferAvailable()) {
+        docWriter.printRamDirUsage("after flush ram dir over");
+        //message("ramdir overlimit ("+docWriter.toMB(docWriter.getRamDirSize())+"/"+docWriter.toMB(docWriter.getRamDirSizeMax())+")");
+        return true;
+      }
+      
       return flushDocs;
 
     } catch (OutOfMemoryError oom) {
@@ -4184,11 +4670,12 @@
   /** Expert:  Return the total size of all index files currently cached in memory.
    * Useful for size management with flushRamDocs()
    */
+  // TODO: do we want this to include the ram dir size?
   public final long ramSizeInBytes() {
     ensureOpen();
     return docWriter.getRAMUsed();
   }
-
+  
   /** Expert:  Return the number of documents currently
    *  buffered in RAM. */
   public final synchronized int numRamDocs() {
@@ -4200,20 +4687,20 @@
 
     int first = segmentInfos.indexOf(merge.segments.info(0));
     if (first == -1)
-      throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), directory);
+      throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), merge.directory);
 
     final int numSegments = segmentInfos.size();
     
     final int numSegmentsToMerge = merge.segments.size();
     for(int i=0;i<numSegmentsToMerge;i++) {
       final SegmentInfo info = merge.segments.info(i);
-
+      
       if (first + i >= numSegments || !segmentInfos.info(first+i).equals(info)) {
         if (segmentInfos.indexOf(info) == -1)
           throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory);
         else
-          throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString(directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
-                                               directory);
+          throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString() + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
+                                                          directory);
       }
     }
 
@@ -4236,7 +4723,7 @@
     final SegmentInfos sourceSegments = merge.segments;
 
     if (infoStream != null)
-      message("commitMergeDeletes " + merge.segString(directory));
+      message("commitMergeDeletes " + merge.segString());
 
     // Carefully merge deletes that occurred after we
     // started merging:
@@ -4305,7 +4792,7 @@
     }
 
     if (infoStream != null)
-      message("commitMerge: " + merge.segString(directory) + " index=" + segString());
+      message("commitMerge: " + merge.segString() + " index=" + segString());
 
     assert merge.registerDone;
 
@@ -4317,7 +4804,7 @@
     // abort this merge
     if (merge.isAborted()) {
       if (infoStream != null)
-        message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
+        message("commitMerge: skipping merge " + merge.segString() + ": it was aborted");
 
       deleter.refresh(merge.info.name);
       return false;
@@ -4375,7 +4862,7 @@
   final private void handleMergeException(Throwable t, MergePolicy.OneMerge merge) throws IOException {
 
     if (infoStream != null) {
-      message("handleMergeException: merge=" + merge.segString(directory) + " exc=" + t);
+      message("handleMergeException: merge=" + merge.segString() + " exc=" + t);
     }
 
     // Set the exception on the merge, so if
@@ -4413,15 +4900,16 @@
     throws CorruptIndexException, IOException {
 
     boolean success = false;
-
+    if (ramNrt && merge.directory == switchDirectory) {
+      assert !merge.useCompoundFile;
+    }
     try {
       try {
         try {
           mergeInit(merge);
 
           if (infoStream != null)
-            message("now merge\n  merge=" + merge.segString(directory) + "\n  merge=" + merge + "\n  index=" + segString());
-
+            message("now merge\n  merge=" + merge.segString() + "\n  merge=" + merge + "\n  index=" + segString());
           mergeMiddle(merge);
           success = true;
         } catch (Throwable t) {
@@ -4436,8 +4924,11 @@
               message("hit exception during merge");
             if (merge.info != null && !segmentInfos.contains(merge.info))
               deleter.refresh(merge.info.name);
+          } else {
+            // if the merge succeeded then push the ram dir size
+            docWriter.pushRamDirSize();
           }
-
+           
           // This merge (and, generally, any change to the
           // segments) may now enable new merges, so we call
           // merge policy & update pending merges.
@@ -4463,9 +4954,9 @@
 
     if (stopMerges) {
       merge.abort();
-      throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.segString(directory));
+      throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.segString());
     }
-
+    
     final int count = merge.segments.size();
     boolean isExternal = false;
     for(int i=0;i<count;i++) {
@@ -4474,16 +4965,20 @@
         return false;
       if (segmentInfos.indexOf(info) == -1)
         return false;
-      if (info.dir != directory)
+      if (info.dir != directory && info.dir != switchDirectory)
         isExternal = true;
     }
 
     ensureContiguousMerge(merge);
-
+    
+    if (ramNrt && merge.directory == switchDirectory) {
+      assert !merge.useCompoundFile;
+    }
+    
     pendingMerges.add(merge);
 
     if (infoStream != null)
-      message("add merge to pendingMerges: " + merge.segString(directory) + " [total " + pendingMerges.size() + " pending]");
+      message("add merge to pendingMerges: " + merge.segString() + " [total " + pendingMerges.size() + " pending]");
 
     merge.mergeGen = mergeGen;
     merge.isExternal = isExternal;
@@ -4628,30 +5123,51 @@
       // make compound file out of them...
       if (infoStream != null)
         message("now flush at merge");
-      doFlush(true, false);
+      doFlush(true, false, false);
     }
 
     merge.increfDone = true;
 
     merge.mergeDocStores = mergeDocStores;
-
+    
+    Directory dir = (merge.directory != null ? merge.directory : directory);
+    
+    //if (dir == switchDirectory) {
+    //  assert !docStoreIsCompoundFile;
+    //}
+    
     // Bind a new segment name here so even with
     // ConcurrentMergePolicy we keep deterministic segment
     // names.
     merge.info = new SegmentInfo(newSegmentName(), 0,
-                                 directory, false, true,
+                                 dir, false, true,
                                  docStoreOffset,
                                  docStoreSegment,
                                  docStoreIsCompoundFile,
                                  false);
-
+    
     // Also enroll the merged segment into mergingSegments;
     // this prevents it from getting selected for a merge
     // after our merge is done but while we are building the
     // CFS:
     mergingSegments.add(merge.info);
   }
-
+  
+  static List getSegmentNames(SegmentInfos infos) {
+    List list = new ArrayList();
+    for (int x=0; x < infos.size(); x++) {
+      list.add(infos.info(x).name);
+    }
+    return list;
+  }
+  
+  String printDir(Directory dir) {
+    if (dir == switchDirectory) return "sd";
+    if (dir == directory) return "d";
+    if (switchDirectory != null && dir == switchDirectory.getSecondaryDir()) return "rd";
+    return "unknown";
+  }
+  
   /** This is called after merging a segment and before
    *  building its CFS.  Return true if the files should be
    *  sync'd.  If you return false, then the source segment
@@ -4711,8 +5227,9 @@
       mergingSegments.remove(merge.info);
       merge.registerDone = false;
     }
-
+    
     runningMerges.remove(merge);
+    if (ramNrt) docWriter.printRamDirUsage("after merge");
   }
 
   /** Does the actual (time-consuming) work of the merge,
@@ -4721,7 +5238,7 @@
   final private int mergeMiddle(MergePolicy.OneMerge merge) 
     throws CorruptIndexException, IOException {
     
-    merge.checkAborted(directory);
+    merge.checkAborted(merge.directory);
 
     final String mergedName = merge.info.name;
     
@@ -4733,7 +5250,7 @@
     final int numSegments = sourceSegments.size();
 
     if (infoStream != null)
-      message("merging " + merge.segString(directory));
+      message("merging " + merge.segString());
 
     merger = new SegmentMerger(this, mergedName, merge);
 
@@ -4790,7 +5307,7 @@
           if (dss.contains(docWriter.getDocStoreSegment())) {
             if (infoStream != null)
               message("now flush at mergeMiddle");
-            doFlush(true, false);
+            doFlush(true, false, false);
           }
         }
 
@@ -5023,7 +5540,7 @@
         buffer.append(' ');
       }
       final SegmentInfo info = infos.info(i);
-      buffer.append(info.segString(directory));
+      buffer.append(info.segString(getInternalDirectory(), this));
       if (info.dir != directory)
         buffer.append("**");
     }
@@ -5208,10 +5725,10 @@
           deleter.incRef(toSync, false);
           myChangeCount = changeCount;
 
-          Iterator it = toSync.files(directory, false).iterator();
+          Iterator it = toSync.files(getInternalDirectory(), false).iterator();
           while(it.hasNext()) {
             String fileName = (String) it.next();
-            assert directory.fileExists(fileName): "file " + fileName + " does not exist";
+            assert getInternalDirectory().fileExists(fileName): "file " + fileName + " does not exist";
           }
 
         } finally {
@@ -5229,8 +5746,8 @@
         while(true) {
 
           final Collection pending = new ArrayList();
-
-          Iterator it = toSync.files(directory, false).iterator();
+          Directory dir = getInternalDirectory();
+          Iterator it = toSync.files(dir, false).iterator();
           while(it.hasNext()) {
             final String fileName = (String) it.next();
             if (startSync(fileName, pending)) {
@@ -5238,10 +5755,10 @@
               try {
                 // Because we incRef'd this commit point, above,
                 // the file had better exist:
-                assert directory.fileExists(fileName): "file '" + fileName + "' does not exist dir=" + directory;
+                assert dir.fileExists(fileName): "file '" + fileName + "' does not exist dir=" + dir;
                 if (infoStream != null)
                   message("now sync " + fileName);
-                directory.sync(fileName);
+                dir.sync(fileName);
                 success = true;
               } finally {
                 finishSync(fileName, success);
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java	(revision 776405)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java	(working copy)
@@ -20,6 +20,8 @@
 import java.io.IOException;
 import java.util.Set;
 
+import org.apache.lucene.store.Directory;
+
 /** <p>This class implements a {@link MergePolicy} that tries
  *  to merge segments into levels of exponentially
  *  increasing size, where each level has < mergeFactor
@@ -60,17 +62,23 @@
   /* TODO 3.0: change this default to true */
   protected boolean calibrateSizeByDeletes = false;
   
-  private boolean useCompoundFile = true;
-  private boolean useCompoundDocStore = true;
-  private IndexWriter writer;
-
+  protected boolean useCompoundFile = true;
+  protected boolean useCompoundDocStore = true;
+  protected IndexWriter writer;
+  protected String type = ""; 
+  
+  public void setType(String type) {
+    this.type = type;
+  }
+  
   protected boolean verbose() {
     return writer != null && writer.verbose();
   }
   
-  private void message(String message) {
-    if (verbose())
-      writer.message("LMP: " + message);
+  protected void message(String message) {
+    if (verbose()) {
+      writer.message("LMP("+type+"): " + message);
+    }
   }
 
   /** <p>Returns the number of segments that are merged at
@@ -112,7 +120,12 @@
   public boolean getUseCompoundFile() {
     return useCompoundFile;
   }
-
+  
+  //Javadoc inherited
+  public boolean useCompoundDocStore(SegmentInfos infos, Directory dir) {
+    return useCompoundDocStore;
+  }
+  
   // Javadoc inherited
   public boolean useCompoundDocStore(SegmentInfos infos) {
     return useCompoundDocStore;
@@ -223,12 +236,12 @@
 
       if (last > 0) {
 
-        spec = new MergeSpecification();
+        spec = new MergeSpecification(writer);
 
         // First, enroll all "full" merges (size
         // mergeFactor) to potentially be run concurrently:
         while (last - maxNumSegments + 1 >= mergeFactor) {
-          spec.add(new OneMerge(infos.range(last-mergeFactor, last), useCompoundFile));
+          spec.add(new OneMerge(infos.range(last-mergeFactor, last), useCompoundFile, writer));
           last -= mergeFactor;
         }
 
@@ -240,7 +253,7 @@
             // Since we must optimize down to 1 segment, the
             // choice is simple:
             if (last > 1 || !isOptimized(writer, infos.info(0)))
-              spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
+              spec.add(new OneMerge(infos.range(0, last), useCompoundFile, writer));
           } else if (last > maxNumSegments) {
 
             // Take care to pick a partial merge that is
@@ -268,7 +281,7 @@
               }
             }
 
-            spec.add(new OneMerge(infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile));
+            spec.add(new OneMerge(infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile, writer));
           }
         }
         
@@ -296,7 +309,7 @@
     if (verbose())
       message("findMergesToExpungeDeletes: " + numSegments + " segments");
 
-    MergeSpecification spec = new MergeSpecification();
+    MergeSpecification spec = new MergeSpecification(writer);
     int firstSegmentWithDeletions = -1;
     for(int i=0;i<numSegments;i++) {
       final SegmentInfo info = segmentInfos.info(i);
@@ -310,7 +323,7 @@
           // deletions, so force a merge now:
           if (verbose())
             message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
-          spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
+          spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile, writer));
           firstSegmentWithDeletions = i;
         }
       } else if (firstSegmentWithDeletions != -1) {
@@ -319,7 +332,7 @@
         // mergeFactor segments
         if (verbose())
           message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
-        spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
+        spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile, writer));
         firstSegmentWithDeletions = -1;
       }
     }
@@ -327,7 +340,7 @@
     if (firstSegmentWithDeletions != -1) {
       if (verbose())
         message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
-      spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
+      spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile, writer));
     }
 
     return spec;
@@ -424,10 +437,10 @@
 
         if (!anyTooLarge) {
           if (spec == null)
-            spec = new MergeSpecification();
+            spec = new MergeSpecification(writer);
           if (verbose())
             message("    " + start + " to " + end + ": add this merge");
-          spec.add(new OneMerge(infos.range(start, end), useCompoundFile));
+          spec.add(new OneMerge(infos.range(start, end), useCompoundFile, writer));
         } else if (verbose())
           message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
 
Index: src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/MergePolicy.java	(revision 776405)
+++ src/java/org/apache/lucene/index/MergePolicy.java	(working copy)
@@ -85,12 +85,15 @@
     final boolean useCompoundFile;
     boolean aborted;
     Throwable error;
+    Directory directory;
+    IndexWriter writer;
 
-    public OneMerge(SegmentInfos segments, boolean useCompoundFile) {
+    public OneMerge(SegmentInfos segments, boolean useCompoundFile, IndexWriter writer) {
       if (0 == segments.size())
         throw new RuntimeException("segments must include at least one segment");
       this.segments = segments;
       this.useCompoundFile = useCompoundFile;
+      this.writer = writer;
     }
 
     /** Record that an exception occurred while executing
@@ -119,15 +122,15 @@
 
     synchronized void checkAborted(Directory dir) throws MergeAbortedException {
       if (aborted)
-        throw new MergeAbortedException("merge is aborted: " + segString(dir));
+        throw new MergeAbortedException("merge is aborted: " + segString());
     }
-
-    String segString(Directory dir) {
+    
+    String segString() {
       StringBuffer b = new StringBuffer();
       final int numSegments = segments.size();
       for(int i=0;i<numSegments;i++) {
         if (i > 0) b.append(' ');
-        b.append(segments.info(i).segString(dir));
+        b.append(segments.info(i).segString(directory, writer));
       }
       if (info != null)
         b.append(" into ").append(info.name);
@@ -144,25 +147,43 @@
    */
 
   public static class MergeSpecification {
-
+    IndexWriter writer;
     /**
      * The subset of segments to be included in the primitive merge.
      */
 
     public List merges = new ArrayList();
-
+    
+    public MergeSpecification(IndexWriter writer) {
+      this.writer = writer;
+    }
+    
     public void add(OneMerge merge) {
       merges.add(merge);
     }
-
-    public String segString(Directory dir) {
+    
+    /**
+     * @deprecated please call {@link #segString()}) instead as merge has the directory
+     * @param directory
+     * @return
+     */
+    public String segString(Directory directory) {
       StringBuffer b = new StringBuffer();
       b.append("MergeSpec:\n");
       final int count = merges.size();
       for(int i=0;i<count;i++)
-        b.append("  ").append(1 + i).append(": ").append(((OneMerge) merges.get(i)).segString(dir));
+        b.append("  ").append(1 + i).append(": ").append(((OneMerge) merges.get(i)).segString());
       return b.toString();
     }
+    
+    public String segString() {
+      StringBuffer b = new StringBuffer();
+      b.append("MergeSpec:\n");
+      final int count = merges.size();
+      for(int i=0;i<count;i++)
+        b.append("  ").append(1 + i).append(": ").append(((OneMerge) merges.get(i)).segString());
+      return b.toString();
+    }
   }
 
   /** Exception thrown if there are any problems while
@@ -267,4 +288,10 @@
    * compound file format.
    */
   abstract boolean useCompoundDocStore(SegmentInfos segments);
+  
+  /**
+   * Returns true if the doc store files should use the
+   * compound file format.
+   */
+  abstract boolean useCompoundDocStore(SegmentInfos segments, Directory dir);
 }
Index: src/java/org/apache/lucene/index/MergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/MergeScheduler.java	(revision 776405)
+++ src/java/org/apache/lucene/index/MergeScheduler.java	(working copy)
@@ -19,6 +19,8 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.store.Directory;
+
 /** <p>Expert: {@link IndexWriter} uses an instance
  *  implementing this interface to execute the merges
  *  selected by a {@link MergePolicy}.  The default
@@ -35,7 +37,7 @@
 */
 
 public abstract class MergeScheduler {
-
+ 
   /** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
   abstract void merge(IndexWriter writer)
     throws CorruptIndexException, IOException;
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 776405)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -83,14 +83,16 @@
     // no need to process segments in reverse order
     final int numSegments = infos.size();
     SegmentReader[] readers = new SegmentReader[numSegments];
-    final Directory dir = writer.getDirectory();
+    final Directory flushDir = writer.getInternalDirectory();
     int upto = 0;
 
     for (int i=0;i<numSegments;i++) {
       boolean success = false;
       try {
         final SegmentInfo info = infos.info(upto);
-        if (info.dir == dir) {
+        // accept the regular and ram dirs 
+        // (not the external dir from addIndexesNoOptimize)
+        if (flushDir.contains(info.dir)) {
           readers[upto++] = writer.readerPool.getReadOnlyClone(info, true);
         }
         success = true;
Index: src/java/org/apache/lucene/index/NRTMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/NRTMergePolicy.java	(revision 0)
+++ src/java/org/apache/lucene/index/NRTMergePolicy.java	(revision 0)
@@ -0,0 +1,188 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.store.Directory;
+
+/**
+ * Merge policy that takes into account the regular IndexWriter merge policy
+ * (that operates on the primary dir) and the ram merge policy which determines
+ * merges for the ram directory.  
+ * 
+ * Optimize merges all segments (ram and primary dir) to the primary dir.
+ * 
+ * Expunge deletes removes deletes from the ram dir and the primary dir
+ * 
+ */
+public class NRTMergePolicy extends MergePolicy {
+  public static final int FIND_MERGES = 1;
+  public static final int FIND_MERGES_EXPUNGE_DELETES = 2;
+  public static final int FIND_MERGES_OPTIMIZE = 3;
+  IndexWriter writer;
+
+  public NRTMergePolicy(IndexWriter writer) {
+    this.writer = writer;
+  }
+  
+  public boolean useCompoundDocStore(SegmentInfos segments) {
+    throw new UnsupportedOperationException("");
+  }
+  
+  /**
+   * If the newSegment is going to ram, get useCompoundFile from the ramMergePolicy
+   * otherwise use the mergePolicy
+   */
+  public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) {
+    assert newSegment.dir != null;
+    if (newSegment.dir == writer.directory) {
+      return writer.mergePolicy.useCompoundFile(segments, newSegment);
+    } else if (newSegment.dir == writer.switchDirectory) {
+      return writer.ramMergePolicy.useCompoundFile(segments, newSegment);
+    }
+    throw new RuntimeException("unknown directory");
+  }
+
+  public boolean useCompoundDocStore(SegmentInfos segments, Directory dir) {
+    assert dir != null;
+    if (dir == writer.directory) {
+      return writer.mergePolicy.useCompoundDocStore(segments);
+    } else {
+      return writer.ramMergePolicy.useCompoundDocStore(segments);
+    }
+  }
+
+  public void close() {
+  }
+
+  public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, IndexWriter writer, int maxSegmentCount, Set segmentsToOptimize)
+      throws CorruptIndexException, IOException {
+    return findMerges(segmentInfos, writer, FIND_MERGES_OPTIMIZE, maxSegmentCount);
+  }
+
+  public MergeSpecification findMerges(SegmentInfos segmentInfos, IndexWriter writer) throws CorruptIndexException, IOException {
+    return findMerges(segmentInfos, writer, FIND_MERGES, 0);
+  }
+  
+  private static boolean mergesCompound(MergeSpecification spec) {
+    if (spec.merges.size() == 0) return false;
+    for (int x=0; x < spec.merges.size(); x++) {
+      MergePolicy.OneMerge merge = (MergePolicy.OneMerge)spec.merges.get(x);
+      if (!merge.useCompoundFile) return false;
+    }
+    return true;
+  }
+  
+  protected MergeSpecification findMerges(SegmentInfos segmentInfos, IndexWriter writer, int type, int maxSegmentCount)
+      throws CorruptIndexException, IOException {
+    SegmentInfos primInfos = writer.getSegmentInfos(writer.directory);
+    SegmentInfos ramInfos = writer.getSegmentInfos(writer.getInternalDirectory()); 
+    assert !writer.ramMergePolicy.useCompoundDocStore;
+    assert !writer.ramMergePolicy.useCompoundFile;
+    MergeSpecification ramSpec = null;
+    // if we're not optimizing we're trying to 
+    // perform merges and writing them to the ram dir
+    // if the ram segments don't look like they will fit
+    // we instead optimize them to disk
+    if (type == FIND_MERGES)
+      ramSpec = writer.ramMergePolicy.findMerges(segmentInfos, writer);
+    else if (type == FIND_MERGES_EXPUNGE_DELETES)
+      ramSpec = writer.ramMergePolicy.findMergesToExpungeDeletes(segmentInfos, writer);
+    writer.ramMergePolicy.setType("ram");
+    ((LogMergePolicy)writer.mergePolicy).setType("disk");
+    if (ramSpec != null) {
+      assert !mergesCompound(ramSpec);
+      boolean ramOverLimit = !segmentsFitInRam(ramSpec, writer);
+      // if the ram mergespec could exceed the available ram then we need to
+      // ask for an optimized mergespec that goes to the primary dir
+      if (ramOverLimit) {
+        // don't overwrite the segmentsToOptimize if it's been written to
+        // we may want to change how segmentsToOptimize works where after
+        // it's not being used anymore, it's nulled
+        writer.segmentsToOptimize = new HashSet();
+        final int numSegments = ramInfos.size();
+        for (int i = 0; i < numSegments; i++)
+          writer.segmentsToOptimize.add(ramInfos.info(i));
+        ramSpec = writer.ramMergePolicy.findMergesForOptimize(ramInfos, writer, 1, writer.segmentsToOptimize);
+      } 
+      if (type == FIND_MERGES_OPTIMIZE || ramOverLimit)
+        // when optimizing we merge all segments to the primary directory
+        // or if we don't fit in ram
+        setDir(ramSpec, writer.getDirectory());
+      else
+        setDir(ramSpec, writer.getInternalDirectory());
+    }
+    MergeSpecification spec = null;
+    if (type == FIND_MERGES) {
+      spec = writer.mergePolicy.findMerges(primInfos, writer);
+      setDir(spec, writer.getDirectory());
+    } else if (type == FIND_MERGES_EXPUNGE_DELETES) {
+      spec = writer.mergePolicy.findMergesToExpungeDeletes(primInfos, writer);
+      setDir(spec, writer.getDirectory());
+    } else if (type == FIND_MERGES_OPTIMIZE) {      
+      spec = writer.mergePolicy.findMergesForOptimize(segmentInfos, writer, maxSegmentCount, writer.segmentsToOptimize);
+      setDir(spec, writer.getDirectory());
+    }
+    MergeSpecification ms = mergeSpecs(ramSpec, spec, writer);
+    return ms;
+  }
+  
+  public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos, IndexWriter writer) throws CorruptIndexException,
+    IOException {
+    return findMerges(segmentInfos, writer, FIND_MERGES_EXPUNGE_DELETES, -1);
+  }
+  
+  private boolean segmentsFitInRam(MergeSpecification spec, IndexWriter writer) throws IOException {
+    if (writer.docWriter.getRAMDirSizeMB() == IndexWriter.DISABLE_AUTO_FLUSH) return false;
+    long total = 0;
+    for (int x = 0; x < spec.merges.size(); x++) {
+      MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(x);
+      total += getSize(merge);
+    }
+    writer.docWriter.growRamDirMaxBy(total);
+    long ramDirAvail = writer.docWriter.getRamDirAvailable();
+    String rdas = writer.docWriter.toMB(ramDirAvail);
+    String ts = writer.docWriter.toMB(total);
+    boolean b = total <= writer.docWriter.getRamDirAvailable();
+    message("fits in ram: "+b+" ramDirAvail:"+rdas+" segSize:"+ts);
+    return b;
+  }
+  
+  protected boolean verbose() {
+    return writer != null && writer.verbose();
+  }
+  
+  protected void message(String message) {
+    if (verbose()) {
+      writer.message("NRTMP: " + message);
+    }
+  }
+  
+  private static long getSize(MergePolicy.OneMerge merge) throws IOException {
+    long total = 0;
+    for (int x = 0; x < merge.segments.size(); x++) {
+      SegmentInfo si = (SegmentInfo) merge.segments.get(x);
+      total += si.sizeInBytes();
+    }
+    return total;
+  }
+
+  private static void setDir(MergeSpecification spec, Directory dir) {
+    if (spec != null) {
+      for (int x = 0; x < spec.merges.size(); x++) {
+        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(x);
+        merge.directory = dir;
+      }
+    }
+  }
+
+  private static MergeSpecification mergeSpecs(MergeSpecification ms1, MergeSpecification ms2, IndexWriter writer) {
+    if (ms1 == null && ms2 == null) return null;
+    if (ms1 != null && ms2 != null) assert ms1.writer == ms2.writer;
+    MergeSpecification ms = new MergeSpecification(writer);
+    if (ms1 != null) ms.merges.addAll(ms1.merges);
+    if (ms2 != null) ms.merges.addAll(ms2.merges);
+    return ms;
+  }
+}
Index: src/java/org/apache/lucene/index/RAMMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/RAMMergePolicy.java	(revision 0)
+++ src/java/org/apache/lucene/index/RAMMergePolicy.java	(revision 0)
@@ -0,0 +1,25 @@
+package org.apache.lucene.index;
+
+/**
+ * Merge Policy for merging IndexWriter ram segments
+ */
+public class RAMMergePolicy extends LogDocMergePolicy {
+  
+  public RAMMergePolicy() {
+    super();
+    useCompoundFile = false;
+    useCompoundDocStore = false;
+  }
+  
+  protected long size(SegmentInfo info) {
+    return info.docCount;
+  }
+  
+  public boolean getUseCompoundFile() {
+    return false;
+  }
+  
+  public boolean useCompoundDocStore(SegmentInfos infos) {
+    return false;
+  }
+}
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java	(revision 776405)
+++ src/java/org/apache/lucene/index/SegmentInfo.java	(working copy)
@@ -78,7 +78,7 @@
                                                   // (if it's an older index)
 
   private boolean hasProx;                        // True if this segment has any fields with omitTermFreqAndPositions==false
-
+  
   public String toString() {
     return "si: "+dir.toString()+" "+name+" docCount: "+docCount+" delCount: "+delCount+" delFileName: "+getDelFileName();
   }
@@ -650,7 +650,7 @@
   }
 
   /** Used for debugging */
-  public String segString(Directory dir) {
+  public String segString(Directory dir, IndexWriter writer) {
     String cfs;
     try {
       if (getUseCompoundFile())
@@ -669,8 +669,8 @@
       docStore = "";
 
     return name + ":" +
-      cfs +
-      (this.dir == dir ? "" : "x") +
+      cfs +"."+
+      writer.printDir(dir)+
       docCount + docStore;
   }
 
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java	(revision 776405)
+++ src/java/org/apache/lucene/index/SegmentInfos.java	(working copy)
@@ -812,13 +812,13 @@
     final int size = size();
     for(int i=0;i<size;i++) {
       final SegmentInfo info = info(i);
-      if (info.dir == dir) {
+      if (dir.contains(dir)) {
         files.addAll(info(i).files());
       }
     }
     return files;
   }
-
+  
   public final void finishCommit(Directory dir) throws IOException {
     if (pendingSegnOutput == null)
       throw new IllegalStateException("prepareCommit was not called");
@@ -884,7 +884,7 @@
     finishCommit(dir);
   }
 
-  synchronized String segString(Directory directory) {
+  synchronized String segString(Directory directory, IndexWriter writer) {
     StringBuffer buffer = new StringBuffer();
     final int count = size();
     for(int i = 0; i < count; i++) {
@@ -892,7 +892,7 @@
         buffer.append(' ');
       }
       final SegmentInfo info = info(i);
-      buffer.append(info.segString(directory));
+      buffer.append(info.segString(directory, writer));
       if (info.dir != directory)
         buffer.append("**");
     }
@@ -920,9 +920,11 @@
   // Used only for testing
   boolean hasExternalSegments(Directory dir) {
     final int numSegments = size();
-    for(int i=0;i<numSegments;i++)
-      if (info(i).dir != dir)
+    for(int i=0;i<numSegments;i++) {
+      if (!dir.contains(info(i).dir)) {
         return true;
+      }
+    }
     return false;
   }
 }
Index: src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMerger.java	(revision 776405)
+++ src/java/org/apache/lucene/index/SegmentMerger.java	(working copy)
@@ -78,7 +78,8 @@
   }
 
   SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge) {
-    directory = writer.getDirectory();
+    if (merge != null) directory = merge.directory;
+    if (directory == null) directory = writer.directory;
     segment = name;
     if (merge != null)
       checkAbort = new CheckAbort(merge, directory);
@@ -690,7 +691,7 @@
     public void work(double units) throws MergePolicy.MergeAbortedException {
       workCount += units;
       if (workCount >= 10000.0) {
-        merge.checkAborted(dir);
+        merge.checkAborted(merge.directory);
         workCount = 0;
       }
     }
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java	(revision 776405)
+++ src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -77,7 +77,7 @@
   // closed.  A given insance of SegmentReader may be
   // closed, even those it shares core objects with other
   // SegmentReaders:
-  private Ref coreRef = new Ref();
+  Ref coreRef = new Ref();
 
   // Compound File Reader when based on a compound file segment
   CompoundFileReader cfsReader = null;
@@ -474,6 +474,8 @@
       final Directory storeDir;
       if (si.getDocStoreOffset() != -1) {
         if (si.getDocStoreIsCompoundFile()) {
+          assert storeCFSReader == null;
+          
           storeCFSReader = new CompoundFileReader(directory(),
                                                   si.getDocStoreSegment() + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION,
                                                   readBufferSize);
@@ -788,9 +790,7 @@
     while (it.hasNext()) {
       ((Norm) it.next()).decRef();
     }
-
     if (coreRef.decRef() == 0) {
-
       // close everything, nothing is shared anymore with other readers
       if (tis != null) {
         tis.close();
@@ -806,7 +806,7 @@
   
       if (fieldsReaderOrig != null)
         fieldsReaderOrig.close();
-  
+
       if (cfsReader != null)
         cfsReader.close();
   
Index: src/java/org/apache/lucene/index/SerialMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/SerialMergeScheduler.java	(revision 776405)
+++ src/java/org/apache/lucene/index/SerialMergeScheduler.java	(working copy)
@@ -19,16 +19,17 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.store.Directory;
+
 /** A {@link MergeScheduler} that simply does each merge
  *  sequentially, using the current thread. */
-public class SerialMergeScheduler extends MergeScheduler {
-
+public class SerialMergeScheduler extends MergeScheduler {  
   /** Just do the merges in sequence. We do this
    * "synchronized" so that even if the application is using
    * multiple threads, only one merge may run at a time. */
   synchronized public void merge(IndexWriter writer)
     throws CorruptIndexException, IOException {
-
+        
     while(true) {
       MergePolicy.OneMerge merge = writer.getNextMerge();
       if (merge == null)
@@ -36,6 +37,6 @@
       writer.merge(merge);
     }
   }
-
+  
   public void close() {}
 }
Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/StoredFieldsWriter.java	(revision 776405)
+++ src/java/org/apache/lucene/index/StoredFieldsWriter.java	(working copy)
@@ -20,6 +20,7 @@
 import java.util.Map;
 import java.io.IOException;
 import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.ArrayUtil;
 
 /** This is a DocFieldConsumer that writes stored fields. */
@@ -46,7 +47,7 @@
       // It's possible that all documents seen in this segment
       // hit non-aborting exceptions, in which case we will
       // not have yet init'd the FieldsWriter:
-      initFieldsWriter();
+      initFieldsWriter(state.directory);
 
       // Fill fdx file to include any final docs that we
       // skipped because they hit non-aborting exceptions
@@ -57,12 +58,12 @@
       fieldsWriter.flush();
   }
   
-  private void initFieldsWriter() throws IOException {
+  private void initFieldsWriter(Directory directory) throws IOException {
     if (fieldsWriter == null) {
       final String docStoreSegment = docWriter.getDocStoreSegment();
       if (docStoreSegment != null) {
         assert docStoreSegment != null;
-        fieldsWriter = new FieldsWriter(docWriter.directory,
+        fieldsWriter = new FieldsWriter(directory,
                                         docStoreSegment,
                                         fieldInfos);
         docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_EXTENSION);
@@ -75,7 +76,7 @@
   synchronized public void closeDocStore(SegmentWriteState state) throws IOException {
     final int inc = state.numDocsInStore - lastDocID;
     if (inc > 0) {
-      initFieldsWriter();
+      initFieldsWriter(state.directory);
       fill(state.numDocsInStore - docWriter.getDocStoreOffset());
     }
 
@@ -138,7 +139,7 @@
 
   synchronized void finishDocument(PerDoc perDoc) throws IOException {
     assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument start");
-    initFieldsWriter();
+    initFieldsWriter(docWriter.writer.directory); // TODO: we need a way to pass the directory in here from segmentWriteState
 
     fill(perDoc.docID);
 
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(revision 776405)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.ArrayUtil;
 
 import java.io.IOException;
@@ -36,9 +37,12 @@
   IndexOutput tvd;
   IndexOutput tvf;
   int lastDocID;
+  Directory dir;
 
   public TermVectorsTermsWriter(DocumentsWriter docWriter) {
     this.docWriter = docWriter;
+    // term vectors always go to the primary directory
+    this.dir = docWriter.writer.directory;
   }
 
   public TermsHashConsumerPerThread addThread(TermsHashPerThread termsHashPerThread) {
@@ -152,9 +156,9 @@
       // vector output files, we must abort this segment
       // because those files will be in an unknown
       // state:
-      tvx = docWriter.directory.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
-      tvd = docWriter.directory.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
-      tvf = docWriter.directory.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+      tvx = dir.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+      tvd = dir.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+      tvf = dir.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
       
       tvx.writeInt(TermVectorsReader.FORMAT_CURRENT);
       tvd.writeInt(TermVectorsReader.FORMAT_CURRENT);
Index: src/java/org/apache/lucene/store/Directory.java
===================================================================
--- src/java/org/apache/lucene/store/Directory.java	(revision 776405)
+++ src/java/org/apache/lucene/store/Directory.java	(working copy)
@@ -67,7 +67,17 @@
   {
     return list();
   }
-
+  
+  /**
+   * Returns true if the given directory is equal to or 
+   * is contained within this directory.
+   * @param other Directory to compare against
+   * @return if this directory equals or contains the given directory
+   */
+  public boolean contains(Directory other) {
+    return equals(other);
+  }
+  
   /** Returns true iff a file with the given name exists. */
   public abstract boolean fileExists(String name)
        throws IOException;
Index: src/java/org/apache/lucene/store/FileSwitchDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/FileSwitchDirectory.java	(revision 776405)
+++ src/java/org/apache/lucene/store/FileSwitchDirectory.java	(working copy)
@@ -44,6 +44,17 @@
     this.lockFactory = primaryDir.getLockFactory();
   }
   
+  public boolean contains(Directory other) {
+    if (other instanceof FileSwitchDirectory) {
+      FileSwitchDirectory otherFsd = (FileSwitchDirectory)other;
+      if (otherFsd.contains(primaryDir) || otherFsd.contains(secondaryDir)) {
+        return true;
+      } 
+      return false;
+    }
+    return primaryDir.equals(other) || secondaryDir.equals(other);
+  }
+  
   public Directory getPrimaryDir() {
     return primaryDir;
   }
@@ -84,7 +95,9 @@
     return name.substring(i+1, name.length());
   }
 
-  private Directory getDirectory(String name) {
+  private Directory getDirectory(String name) throws IOException {
+    if (primaryDir.fileExists(name)) return primaryDir;
+    if (secondaryDir.fileExists(name)) return secondaryDir;
     String ext = getExtension(name);
     if (primaryExtensions.contains(ext)) {
       return primaryDir;
Index: src/java/org/apache/lucene/store/RAMDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/RAMDirectory.java	(revision 776405)
+++ src/java/org/apache/lucene/store/RAMDirectory.java	(working copy)
@@ -37,7 +37,7 @@
   private static final long serialVersionUID = 1l;
 
   HashMap fileMap = new HashMap();
-  long sizeInBytes = 0;
+  volatile long sizeInBytes = 0;
   
   // *****
   // Lock acquisition sequence:  RAMDirectory, then RAMFile
Index: src/test/org/apache/lucene/index/TestCrash.java
===================================================================
--- src/test/org/apache/lucene/index/TestCrash.java	(revision 776405)
+++ src/test/org/apache/lucene/index/TestCrash.java	(working copy)
@@ -49,7 +49,7 @@
     return writer;
   }
 
-  private void crash(final IndexWriter writer) throws IOException {
+  public static void crash(final IndexWriter writer) throws IOException {
     final MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
     ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getMergeScheduler();
     dir.crash();
Index: src/test/org/apache/lucene/index/TestIndexWriterRamDir.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterRamDir.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexWriterRamDir.java	(revision 0)
@@ -0,0 +1,613 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.FileSwitchDirectory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests the IndexWriter ram directory where if a ram directory is 
+ * set on the writer, flushes occur to it first, then are later
+ * merged to disk.
+ */
+public class TestIndexWriterRamDir extends LuceneTestCase {
+  
+  private static MockRAMDirectory createRamDir() throws IOException {
+    Set prevExts = new HashSet(); 
+    prevExts.add("cfs"); // prevent compound files
+    MockRAMDirectory ramDir = new MockRAMDirectory(prevExts);
+    return ramDir;
+  }
+  
+  /**
+   * Test if IW.optimize merges all segments into 1 and 
+   * moves them to disk.
+   * @throws Exception
+   */
+  public void testOptimize() throws Exception {
+    Directory dir = new MockRAMDirectory();
+    MockRAMDirectory ramDir = createRamDir();
+    // have ramdir assert compound files aren't created
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, ramDir);
+    assertTrue(writer.ramNrt);
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    // create some segments
+    int rc = 0;
+    for (int x=0; x < 9000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "primary", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+        rc++;
+      }
+    }
+    writer.commit(); // commit them to RAM
+    for (int x=0; x < 4000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "postcommit", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) writer.flush();
+    }
+    writer.optimize();
+    cms.sync();
+    IndexReader reader = writer.getReader();
+    IndexReader[] readers = getAllReaders(reader);
+    assertEquals(1, readers.length); // insure we have only 1 segment on disk
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertEquals(1, infos.size());
+    SegmentInfo info = infos.info(0);
+    assertEquals(dir, info.dir); // verify the directory is the primary
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Test to insure expungeDeletes actually removes all
+   * segments with deleted docs when RAM NRT is on.
+   * @throws Exception
+   */
+  public void testExpungeDeletes() throws Exception {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, createRamDir());
+    assertTrue(writer.ramNrt);
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    // create some segments
+    for (int x=0; x < 9000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "primary", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) writer.flush();
+    }
+    writer.commit(); // commit them to RAM
+    for (int x=0; x < 9000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "primary", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) writer.flush();
+    }
+    writer.expungeDeletes();
+    cms.sync();
+    IndexReader reader = writer.getReader();
+    IndexReader[] readers = getAllReaders(reader);
+    for (int x=0; x < readers.length; x++) {
+      assertEquals(0, readers[x].numDeletedDocs());
+    }    
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  static private final IndexReader[] indexReaderZeroArray = new IndexReader[0];
+  
+  private static void gatherSubReaders(List allSubReaders, IndexReader r) {
+    IndexReader[] subReaders = r.getSequentialSubReaders();
+    if (subReaders == null) {
+      // Add the reader itself, and do not recurse
+      allSubReaders.add(r);
+    } else {
+      for(int i=0;i<subReaders.length;i++) {
+        gatherSubReaders(allSubReaders, subReaders[i]);
+      }
+    }
+  }
+  
+  private static IndexReader[] getAllReaders(IndexReader reader) {
+    List subReadersList = new ArrayList();
+    gatherSubReaders(subReadersList, reader);
+    IndexReader[] sortedSubReaders = (IndexReader[]) subReadersList.toArray(indexReaderZeroArray);
+    return sortedSubReaders;
+  }
+  
+  public void testHasExternalSegments() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true);
+    assertTrue(writer.ramNrt);
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    FileSwitchDirectory fsd = (FileSwitchDirectory)writer.getInternalDirectory();
+    
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "prim", 5);
+      writer.addDocument(d);
+    }
+    writer.commit(); // commit to primary dir
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(); // flush to ram
+    
+    IndexReader reader = writer.getReader();
+    IndexReader ramReader = writer.getRamReader();
+    assertEquals(1000, ramReader.maxDoc());
+    IndexReader primaryReader = writer.getPrimaryReader();
+    assertEquals(1000, primaryReader.maxDoc());
+    
+    // we have ram and primary dir segments, make sure 
+    // hasExternalSegments works for FileSwitchDirectory
+    boolean hes = writer.getSegmentInfos().hasExternalSegments(writer.getInternalDirectory());
+    assertFalse(hes);
+    reader.close();
+    ramReader.close();
+    primaryReader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Test ram NRT with an FSDirectory
+   * @throws IOException
+   */
+  public void testFSDirectory() throws IOException {
+    String tempDir = System.getProperty("java.io.tmpdir");
+    if (tempDir == null)
+        throw new IOException("java.io.tmpdir undefined, cannot run test");
+    File indexDir = new File(tempDir, "lucenetestindexwriterramdir");
+    File[] files = indexDir.listFiles();
+    if (files != null) {
+      for (int x=0; x < files.length; x++) {
+        if (!files[x].isDirectory()) files[x].delete();
+      }
+    }
+    Directory dir = FSDirectory.getDirectory(indexDir);
+    IndexWriter.unlock(dir);
+    openWriterAddDocs(dir, false);
+    dir.close();
+    dir = FSDirectory.getDirectory(indexDir);
+    IndexWriter.unlock(dir);
+    openWriterAddDocs(dir, true);
+    dir.close();
+  }
+  
+  public static void openWriterAddDocs(Directory dir, boolean commitClose) throws IOException {
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true);
+    Set infoFiles = new HashSet();
+    SegmentInfos infos = writer.getSegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      List files = info.files();
+      for (int i=0; i < files.size(); i++) {
+        String f = (String)files.get(i);
+        infoFiles.add(f);
+      }
+    }
+    System.out.println("infos files:"+infoFiles);
+    System.out.println("dirfiles:"+printFiles(dir));
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush();
+    if (commitClose) {
+      writer.commit();
+      writer.close();
+    }
+  }
+  
+  /**
+   * Make sure after IndexWriter has created
+   * FileSwitchDirectory based .fdt files which 
+   * are written to the primary directory, 
+   * and IW cuts out before being closed and the ram segments
+   * flushed to the primary dir, a new IW (via IndexFileDeleter) 
+   * cleans up the now unused .fdt files.
+   * 
+   * IndexWriter is created, the 
+   * @throws IOException
+   */
+  public void testIFDDeletingAfterCrash() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true);
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    FileSwitchDirectory fsd = (FileSwitchDirectory)writer.getInternalDirectory();
+    
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush();
+    Set segmentNames = getSegmentNames(fsd.getSecondaryDir());
+    List fdtFileNames = new ArrayList();
+    Iterator segNameIterator = segmentNames.iterator();
+    while (segNameIterator.hasNext()) {
+      String name = (String)segNameIterator.next();
+      String fileName = name+".fdt";
+      if (dir.fileExists(fileName)) {
+        fdtFileNames.add(fileName);
+      }
+    }
+    
+    System.out.println("fdtFileNames:"+fdtFileNames);
+    
+    IndexWriter.unlock(dir);
+    writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    System.out.println("dirfiles:"+printFiles(dir));
+    // now verify the fdt files are no longer in the dir
+    for (int x=0; x < fdtFileNames.size(); x++) {
+      String name = (String)fdtFileNames.get(x);
+      assertFalse(dir.fileExists(name));
+    }
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Get all the unique .tii files, get the segment names
+   * and return them
+   * @throws IOException
+   */
+  public static Set getSegmentNames(Directory dir) throws IOException {
+    Set set = new HashSet();
+    String[] files = dir.listAll();
+    for (int x=0; x < files.length; x++) {
+      if (files[x].endsWith(".tii")) {
+        String str = files[x].substring(0, files[x].indexOf('.'));
+        set.add(str);
+      }
+    }
+    return set;
+  }
+  
+  public void testFSDFilesInPrimaryDir() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true);
+    //writer.setRAMBufferSizeMB(0.06); // 60 kilobytes should be exceeded quickly
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    assertTrue(writer.getDirectory() != writer.getInternalDirectory());
+    FileSwitchDirectory fsd = (FileSwitchDirectory)writer.getInternalDirectory();
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }    
+    // we should have some fdx,fdt,tvx files in the primary dir
+    writer.flush();
+    writer.commit();
+    cms.sync();
+    //Set segNames = getSegmentNames(writer.getFlushDirectory());
+    //System.out.println("segnames:"+segNames);
+    
+    //System.out.println("dir:"+printFiles(dir));
+    //System.out.println("ramDir:"+printFiles(ramDir));
+    // right files aren't there
+    
+    //writer.mergeRamSegmentsToDir(false);
+    
+    //writer.updatePendingMerges(1, false, true);
+    cms.sync(); // wait for the merges to complete
+    
+    //assertTrue(hasDirSegmentInfos(writer.getFlushDirectory(), writer.getSegmentInfos()));
+    
+    //System.out.println("infos.size:"+writer.getSegmentInfos().size());
+    
+    assertTrue(hasDirSegmentInfos(dir, writer.getSegmentInfos()));
+    IndexReader reader = writer.getReader();
+    assertEquals(1000, reader.maxDoc());
+    SegmentInfos infos = writer.getSegmentInfos();
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    writer.close();
+    reader.close();
+    dir.close();
+  }
+  
+  public static String printFiles(Directory dir) throws IOException {
+    String[] files = dir.listAll();
+    List filesList = new ArrayList();
+    for (int x=0; x < files.length; x++) {
+      filesList.add(files[x]);
+    }
+    return filesList.toString();
+  }
+  
+  /**
+   * Test IndexWriter performing in ram merges
+   * @throws IOException
+   */
+  public void testRamExceeded() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, createRamDir());
+    writer.setRAMBufferSizeMB(0.06); // 60 kilobytes should be exceeded quickly
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    assertTrue(writer.getDirectory() != writer.getInternalDirectory());
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    
+    RAMMergePolicy ramMp = new RAMMergePolicy();
+    ramMp.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMp);
+    
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    //SegmentInfos flushInfos = writer.getSegmentInfos(writer.getFlushDirectory());
+    //System.out.println("flushInfos.size:"+flushInfos.size());
+    cms.sync();
+    System.out.println("ram dir avail:"+writer.docWriter.getRamDirAvailable()+" used:"+writer.docWriter.getRamDirSize());
+    System.out.println("ram buf avail:"+writer.docWriter.getRamBufferAvailable()+" used:"+writer.docWriter.numBytesUsed);
+    //assertTrue(hasDirSegmentInfos(writer.getFlushDirectory(), writer.getSegmentInfos()));
+    
+    //System.out.println("infos.size:"+writer.getSegmentInfos().size());
+    
+    //assertTrue(hasDirSegmentInfos(dir, writer.getSegmentInfos()));
+    IndexReader reader = writer.getReader();
+    assertEquals(1000, reader.maxDoc());
+    SegmentInfos infos = writer.getSegmentInfos();
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    writer.close();
+    reader.close();
+    
+    dir.close();
+  }
+  
+  /**
+   * Test IndexWriter doing in ram merges
+   * @throws IOException
+   */
+  public void testMergeInRamExceeded() throws IOException {
+    MockRAMDirectory dir = new MockRAMDirectory();
+    MockRAMDirectory ramDir = createRamDir();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, ramDir);
+    writer.setInfoStream(System.out);
+    writer.setRAMBufferSizeMB(0.2); // 100K for the ramdir and 100k for the ram buffer
+    System.out.println("ramDirSizeMax:"+writer.docWriter.getRamDirSizeMax());
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    
+    RAMMergePolicy ramMp = new RAMMergePolicy();
+    ramMp.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMp);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertTrue(writer.getRamDirSegmentInfos().size() > 0);
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    reader.close();
+    writer.close(); // when the reader is closed after the writer things are fine?
+    
+    dir.close();
+  }
+  
+  /**
+   * Test IndexWriter doing in ram merges
+   * @throws IOException
+   */
+  public void testMergeInRam() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, createRamDir());
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    
+    RAMMergePolicy ramMp = new RAMMergePolicy();
+    ramMp.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMp);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertTrue(writer.getRamDirSegmentInfos().size() > 0);
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  public static class NoMergeScheduler extends MergeScheduler {
+    public void merge(IndexWriter writer) throws CorruptIndexException, IOException {}
+    public void close() {}
+  }
+  
+  /**
+   * Make sure IW.commit flushes all ram segments 
+   * to the directory
+   * 
+   * @throws IOException
+   */
+  public void testCommit() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, createRamDir());
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    //writer.setMergeScheduler(new NoMergeScheduler());
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertTrue(infos.size() == 10);
+    assertTrue(hasDirSegmentInfos(writer.getInternalDirectory(), infos));
+    writer.resolveRamSegments(); // should happen in the foreground
+    SegmentInfos ramInfos = writer.getRamDirSegmentInfos();
+    assertEquals(0, ramInfos.size());
+    
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+   * Test IndexWriter merging to disk
+   * @throws IOException
+   */
+  public void testMergeToPrimaryDir() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, createRamDir());
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    //System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    assertTrue(infos.size() == 10);
+    assertTrue(hasDirSegmentInfos(writer.getInternalDirectory(), infos));
+    writer.mergeRamSegmentsToDir();
+    cms.sync(); // wait for the merges to complete
+    //System.out.println("ram infos.size:"+writer.getRamDirSegmentInfos().size()+" "+IndexWriter.getSegmentNames(writer.getRamDirSegmentInfos()));
+    // there shouldn't be any ram segments
+    SegmentInfos ramInfos = writer.getRamDirSegmentInfos();
+    // make sure the number of segments decreased
+    assertTrue(ramInfos.size() < 3);
+    
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  private static boolean hasDirSegmentInfos(Directory dir, SegmentInfos infos) {
+    int dirSegs = 0;
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        dirSegs++;
+      }
+    }
+    return dirSegs > 0;
+  }
+  
+  /**
+   * Test the ram dir is working and that the
+   * reader returned from IndexWriter encompasses
+   * the in ram segments.
+   * @throws IOException
+   */
+  public void testRamDir() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED, true, createRamDir());
+    for (int x=0; x < 100; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(false, false, true);
+    SegmentInfos infos = writer.getSegmentInfos();
+    assertTrue(writer.getDirectory() != writer.getInternalDirectory());
+    boolean hasRamDirSegs = hasDirSegmentInfos(writer.getInternalDirectory(), infos);
+    //System.out.println("ramDirSegs:"+ramDirSegs);
+    assertTrue(hasRamDirSegs);
+    IndexReader ramReader = writer.getReader();
+    assertEquals(100, ramReader.maxDoc());
+    writer.close();
+    ramReader.close();
+    dir.close();
+  }
+}
Index: src/test/org/apache/lucene/store/MockRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/MockRAMDirectory.java	(revision 776405)
+++ src/test/org/apache/lucene/store/MockRAMDirectory.java	(working copy)
@@ -47,6 +47,7 @@
   private Set unSyncedFiles;
   private Set createdFiles;
   volatile boolean crashed;
+  private Set extPrevent;
 
   // NOTE: we cannot initialize the Map here due to the
   // order in which our constructor actually does this
@@ -79,7 +80,19 @@
     super(dir);
     init();
   }
-
+  
+  public MockRAMDirectory(Set extPrevent) throws IOException {
+    this.extPrevent = extPrevent;
+    init();
+  }
+  
+  private boolean extensionOK(String name) {
+    if (extPrevent == null) return true;
+    String ext = FileSwitchDirectory.getExtension(name);
+    if (ext == null || ext.equals("")) return false;
+    return !extPrevent.contains(ext);
+  }
+  
   /** If set to true, we throw an IOException if the same
    *  file is opened by createOutput, ever. */
   public void setPreventDoubleWrite(boolean value) {
@@ -204,6 +217,9 @@
     if (crashed)
       throw new IOException("cannot createOutput after crash");
     init();
+    if (!extensionOK(name)) {
+      throw new IOException("extension for "+name+" not allowed");
+    }
     if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen"))
       throw new IOException("file \"" + name + "\" was already written to");
     if (noDeleteOpenFile && openFiles.containsKey(name))
Index: src/test/org/apache/lucene/store/MockRAMInputStream.java
===================================================================
--- src/test/org/apache/lucene/store/MockRAMInputStream.java	(revision 776405)
+++ src/test/org/apache/lucene/store/MockRAMInputStream.java	(working copy)
@@ -1,6 +1,7 @@
 package org.apache.lucene.store;
 
 import java.io.IOException;
+import java.util.Set;
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -28,6 +29,7 @@
   private MockRAMDirectory dir;
   private String name;
   private boolean isClone;
+  StackTraceElement[] stackTrace; // caller of the stream
 
   /** Construct an empty output buffer. 
    * @throws IOException */
@@ -35,6 +37,7 @@
     super(f);
     this.name = name;
     this.dir = dir;
+    stackTrace = Thread.currentThread().getStackTrace();
   }
 
   public void close() {
Index: src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/TestFileSwitchDirectory.java	(revision 776405)
+++ src/test/org/apache/lucene/store/TestFileSwitchDirectory.java	(working copy)
@@ -45,6 +45,7 @@
         IndexWriter.MaxFieldLength.LIMITED);
     writer.setUseCompoundFile(false);
     TestIndexWriterReader.createIndexNoClose(true, "ram", writer);
+    
     IndexReader reader = writer.getReader();
     assertEquals(100, reader.maxDoc());
     writer.commit();
Index: src/test/org/apache/lucene/store/TestMockRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/TestMockRAMDirectory.java	(revision 0)
+++ src/test/org/apache/lucene/store/TestMockRAMDirectory.java	(revision 0)
@@ -0,0 +1,23 @@
+package org.apache.lucene.store;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestMockRAMDirectory extends LuceneTestCase {
+  public void testMockRAMDirectory() throws IOException {
+    Set set = new HashSet();
+    set.add("cfs");
+    MockRAMDirectory ramDir = new MockRAMDirectory(set);
+    boolean ex = false;
+    try {
+      IndexOutput output = ramDir.createOutput("test1.cfs");
+    } catch (Throwable th) {
+      ex = true;
+    }
+    ramDir.close();
+    assertTrue(ex);
+  }
+}
