Index: src/test/org/apache/lucene/index/TestIndexWriterRamDir.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterRamDir.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexWriterRamDir.java	(revision 0)
@@ -0,0 +1,205 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests the IndexWriter ram directory where if a ram directory is 
+ * set on the writer, flushes occur to it first, then are later
+ * merged to disk.
+ */
+// TODO: test cases for addIndexesNoOptimize still working properly
+public class TestIndexWriterRamDir extends LuceneTestCase {
+  
+  /**
+   * Test IndexWriter doing in ram merges
+   * @throws IOException
+   */
+  public void testMergeRamExceeded() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, ramDir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.setRAMBufferSizeMB(0.03); // 30 kilobytes should be exceeded quickly
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    ramMP.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 1000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      //if (x % 100 == 0) {
+      //  writer.flush();
+      //}
+    }
+    IndexReader reader = writer.getReader();
+    assertEquals(1000, reader.maxDoc());
+    SegmentInfos infos = writer.getSegmentInfos();
+    System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    writer.close();
+    reader.close();
+    dir.close();
+    ramDir.close();
+  }
+  
+  /**
+   * Test IndexWriter doing in ram merges
+   * @throws IOException
+   */
+  public void testMergeInRam() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, ramDir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    ramMP.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    writer.close();
+    dir.close();
+    ramDir.close();
+  }
+  
+  /**
+   * Test IndexWriter merging to disk
+   * @throws IOException
+   */
+  public void testMergeToDisk() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, ramDir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    writer.setMergeScheduler(cms);
+    
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    writer.setMergePolicy(mp);
+    LogDocMergePolicy ramMP = new LogDocMergePolicy();
+    //ramMP.setMergeFactor(2);
+    writer.setRAMMergePolicy(ramMP);
+    
+    // create 10 segments with 500 documents each
+    for (int x=0; x < 5000; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+      if (x % 500 == 0) {
+        writer.flush();
+      }
+    }
+    SegmentInfos infos = writer.getSegmentInfos();
+    System.out.println("infos.size:"+infos.size()+" raminfos.size:"+writer.getRamDirSegmentInfos().size());
+    assertTrue(infos.size() == 10);
+    assertTrue(hasDirSegmentInfos(ramDir, infos));
+    writer.updatePendingMerges(1, false, true);
+    cms.sync(); // wait for the merges to complete
+    System.out.println("ram infos.size:"+writer.getRamDirSegmentInfos().size()+" "+getSegmentNames(writer.getRamDirSegmentInfos()));
+    // there shouldn't be any ram segments
+    SegmentInfos ramInfos = IndexWriter.getInfosByDir(infos, ramDir);
+    // make sure the number of segments decreased
+    assertTrue(ramInfos.size() < 3);
+    
+    IndexReader reader = writer.getReader();
+    assertEquals(5000, reader.maxDoc());
+    
+    reader.close();
+    writer.close();
+    dir.close();
+    ramDir.close();
+  }
+  
+  public static List getSegmentNames(SegmentInfos infos) {
+    List list = new ArrayList();
+    for (int x=0; x < infos.size(); x++) {
+      list.add(infos.info(x).name);
+    }
+    return list;
+  }
+  
+  private static boolean hasDirSegmentInfos(Directory dir, SegmentInfos infos) {
+    int dirSegs = 0;
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        dirSegs++;
+      }
+    }
+    //System.out.println("ramDirSegs:"+ramDirSegs);
+    return dirSegs > 0;
+  }
+  
+  /**
+   * Test the ram dir is working and that the
+   * reader returned from IndexWriter encompasses
+   * the in ram segments.
+   * @throws IOException
+   */
+  public void testRamDir() throws IOException {
+    Directory dir = new MockRAMDirectory();
+    RAMDirectory ramDir = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir, ramDir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);    
+    for (int x=0; x < 100; x++) {
+      Document d = TestIndexWriterReader.createDocument(x, "ram", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(false, false, true);
+    SegmentInfos infos = writer.getSegmentInfos();
+    boolean hasRamDirSegs = hasDirSegmentInfos(ramDir, infos);
+    //System.out.println("ramDirSegs:"+ramDirSegs);
+    assertTrue(hasRamDirSegs);
+    IndexReader ramReader = writer.getReader();
+    assertEquals(100, ramReader.maxDoc());
+    writer.close();
+    ramReader.close();
+    dir.close();
+    ramDir.close();
+  }
+}

Property changes on: src/test/org/apache/lucene/index/TestIndexWriterRamDir.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/test/org/apache/lucene/store/MockRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/MockRAMDirectory.java	(revision 770396)
+++ src/test/org/apache/lucene/store/MockRAMDirectory.java	(working copy)
@@ -242,9 +242,9 @@
     synchronized (this) {
       file = (RAMFile)fileMap.get(name);
     }
-    if (file == null)
-      throw new FileNotFoundException(name);
-    else {
+    if (file == null) {
+      throw new FileNotFoundException(name+" "+"files:"+Arrays.asList(listAll()));
+    } else {
       synchronized(openFiles) {
         if (openFiles.containsKey(name)) {
           Integer v = (Integer) openFiles.get(name);
Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 770396)
+++ src/java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -400,7 +400,7 @@
         segmentInfos.setUserData(commitUserData);
         // Default deleter (for backwards compatibility) is
         // KeepOnlyLastCommitDeleter:
-        IndexFileDeleter deleter =  new IndexFileDeleter(directory,
+        IndexFileDeleter deleter =  new IndexFileDeleter(directory, null,
                                                          deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
                                                          segmentInfos, null, null);
 
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java	(revision 770396)
+++ src/java/org/apache/lucene/index/SegmentInfo.java	(working copy)
@@ -549,7 +549,7 @@
    * modify it.
    */
 
-  public List files() throws IOException {
+  public synchronized List files() throws IOException {
 
     if (files != null) {
       // Already cached:
@@ -644,7 +644,7 @@
 
   /* Called whenever any change is made that affects which
    * files this segment has. */
-  private void clearFiles() {
+  private synchronized void clearFiles() {
     files = null;
     sizeInBytes = -1;
   }
Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(revision 770396)
+++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java	(working copy)
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.store.Directory;
 
 /**
  * Gathers all Fieldables for a document under the same
@@ -146,7 +147,7 @@
   }
 
   public DocumentsWriter.DocWriter processDocument() throws IOException {
-
+    Directory dir = this.docState.docWriter.writer.getFlushDirectory();
     consumer.startDocument();
     final Document doc = docState.doc;
 
Index: src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/MergePolicy.java	(revision 770396)
+++ src/java/org/apache/lucene/index/MergePolicy.java	(working copy)
@@ -85,6 +85,7 @@
     final boolean useCompoundFile;
     boolean aborted;
     Throwable error;
+    Directory directory;
 
     public OneMerge(SegmentInfos segments, boolean useCompoundFile) {
       if (0 == segments.size())
Index: src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- src/java/org/apache/lucene/index/LogMergePolicy.java	(revision 770396)
+++ src/java/org/apache/lucene/index/LogMergePolicy.java	(working copy)
@@ -19,7 +19,9 @@
 
 import java.io.IOException;
 import java.util.Set;
+import java.util.List;
 
+import org.apache.lucene.index.MergePolicy.OneMerge;
 import org.apache.lucene.store.Directory;
 
 /** <p>This class implements a {@link MergePolicy} that tries
@@ -301,7 +303,29 @@
 
     return spec;
   }
-
+  
+  long size(MergeSpecification spec) throws IOException {
+    long size = 0;
+    for (int x=0; x < spec.merges.size(); x++) {
+      MergePolicy.OneMerge merge = (MergePolicy.OneMerge)spec.merges.get(x);
+      size += IndexWriter.size(merge.segments);
+    }
+    return size;
+  }
+  
+  long getMaxRamDirSize() {
+    return (long)(writer.getRAMBufferSizeMB()*(double)1024*(double)1024);
+  }
+  
+  MergePolicy.OneMerge combineMerges(List merges) {
+    SegmentInfos infos = new SegmentInfos();
+    for (int x=0; x < merges.size(); x++) {
+      MergePolicy.OneMerge m = (MergePolicy.OneMerge)merges.get(x);
+      infos.addAll(m.segments);
+    }
+    return new MergePolicy.OneMerge(infos, useCompoundFile);
+  }
+  
   /** Checks if any merges are now necessary and returns a
    *  {@link MergePolicy.MergeSpecification} if so.  A merge
    *  is necessary when there are more than {@link
@@ -309,8 +333,7 @@
    *  multiple levels have too many segments, this method
    *  will return multiple merges, allowing the {@link
    *  MergeScheduler} to use concurrency. */
-  public MergeSpecification findMerges(SegmentInfos infos, IndexWriter writer) throws IOException {
-
+  public MergeSpecification findMerges(SegmentInfos infos, IndexWriter writer) throws IOException {  
     final int numSegments = infos.size();
     this.writer = writer;
     if (verbose())
@@ -321,8 +344,6 @@
     float[] levels = new float[numSegments];
     final float norm = (float) Math.log(mergeFactor);
 
-    final Directory directory = writer.getDirectory();
-
     for(int i=0;i<numSegments;i++) {
       final SegmentInfo info = infos.info(i);
       long size = size(info);
Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/StoredFieldsWriter.java	(revision 770396)
+++ src/java/org/apache/lucene/index/StoredFieldsWriter.java	(working copy)
@@ -20,6 +20,7 @@
 import java.util.Map;
 import java.io.IOException;
 import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.ArrayUtil;
 
 /** This is a DocFieldConsumer that writes stored fields. */
@@ -62,7 +63,8 @@
       final String docStoreSegment = docWriter.getDocStoreSegment();
       if (docStoreSegment != null) {
         assert docStoreSegment != null;
-        fieldsWriter = new FieldsWriter(docWriter.directory,
+        Directory directory = docWriter.writer.getFlushDirectory();
+        fieldsWriter = new FieldsWriter(directory,
                                         docStoreSegment,
                                         fieldInfos);
         docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_EXTENSION);
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java	(revision 770396)
+++ src/java/org/apache/lucene/index/SegmentInfos.java	(working copy)
@@ -918,10 +918,10 @@
   }
 
   // Used only for testing
-  boolean hasExternalSegments(Directory dir) {
+  boolean hasExternalSegments(Directory dir, Directory ramDir) {
     final int numSegments = size();
     for(int i=0;i<numSegments;i++)
-      if (info(i).dir != dir)
+      if (info(i).dir != dir && info(i).dir != ramDir)
         return true;
     return false;
   }
Index: src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMerger.java	(revision 770396)
+++ src/java/org/apache/lucene/index/SegmentMerger.java	(working copy)
@@ -78,7 +78,7 @@
   }
 
   SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge) {
-    directory = writer.getDirectory();
+    directory = merge.directory;//writer.getDirectory();
     segment = name;
     if (merge != null)
       checkAbort = new CheckAbort(merge, directory);
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(revision 770396)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.RAMOutputStream;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.ArrayUtil;
 
 import java.io.IOException;
@@ -152,9 +153,10 @@
       // vector output files, we must abort this segment
       // because those files will be in an unknown
       // state:
-      tvx = docWriter.directory.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
-      tvd = docWriter.directory.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
-      tvf = docWriter.directory.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+      Directory dir = docWriter.writer.getFlushDirectory();
+      tvx = dir.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+      tvd = dir.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+      tvf = dir.createOutput(docStoreSegment +  "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
       
       tvx.writeInt(TermVectorsReader.FORMAT_CURRENT);
       tvd.writeInt(TermVectorsReader.FORMAT_CURRENT);
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 770396)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -25,6 +25,7 @@
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.util.ArrayUtil;
 
@@ -110,7 +111,6 @@
 final class DocumentsWriter {
 
   IndexWriter writer;
-  Directory directory;
 
   String segment;                         // Current segment we are working on
   private String docStoreSegment;         // Current doc-store segment we are writing
@@ -155,7 +155,7 @@
       return docWriter.writer.testPoint(name);
     }
   }
-
+  
   /** Consumer returns this on each doc.  This holds any
    *  state that must be flushed synchronized "in docID
    *  order".  We gather these and flush them in order. */
@@ -259,8 +259,7 @@
 
   private boolean closed;
 
-  DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain) throws IOException {
-    this.directory = directory;
+  DocumentsWriter(IndexWriter writer, IndexingChain indexingChain) throws IOException {
     this.writer = writer;
     this.similarity = writer.getSimilarity();
     flushedDocCount = writer.maxDoc();
@@ -536,6 +535,7 @@
 
   synchronized private void initFlushState(boolean onlyDocStore) {
     initSegmentName(onlyDocStore);
+    Directory directory = writer.getFlushDirectory();
     flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getTermIndexInterval());
   }
 
@@ -549,7 +549,8 @@
     assert nextDocID == numDocsInRAM;
     assert waitQueue.numWaiting == 0;
     assert waitQueue.waitingBytes == 0;
-
+    
+    Directory directory = writer.getFlushDirectory();
     initFlushState(false);
 
     docStoreOffset = numDocsInStore;
@@ -574,7 +575,7 @@
       consumer.flush(threads, flushState);
 
       if (infoStream != null) {
-        final long newSegmentSize = segmentSize(flushState.segmentName);
+        final long newSegmentSize = segmentSize(flushState.segmentName, directory);
         String message = "  oldRAMSize=" + numBytesUsed +
           " newFlushedSize=" + newSegmentSize +
           " docs/MB=" + nf.format(numDocsInRAM/(newSegmentSize/1024./1024.)) +
@@ -600,7 +601,7 @@
   }
 
   /** Build compound file for the segment we just flushed */
-  void createCompoundFile(String segment) throws IOException {
+  void createCompoundFile(String segment, Directory directory) throws IOException {
     
     CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
     Iterator it = flushState.flushedFiles.iterator();
@@ -763,7 +764,6 @@
       // This call is not synchronized and does all the
       // work
       final DocWriter perDoc = state.consumer.processDocument();
-        
       // This call is synchronized but fast
       finishDocument(state, perDoc);
       success = true;
@@ -914,11 +914,13 @@
 
     int docStart = 0;
     boolean any = false;
+    Directory flushDir = writer.getFlushDirectory();
+    Directory directory = writer.getDirectory();
     for (int i = 0; i < infosEnd; i++) {
 
       // Make sure we never attempt to apply deletes to
       // segment in external dir
-      assert infos.info(i).dir == directory;
+      assert infos.info(i).dir == directory || infos.info(i).dir == flushDir;
 
       SegmentReader reader = writer.readerPool.get(infos.info(i), false);
       try {
@@ -1103,7 +1105,17 @@
   long getRAMUsed() {
     return numBytesUsed;
   }
-
+  
+  boolean ramOverLimit() {
+    RAMDirectory ramDir = (RAMDirectory)writer.getRAMDirectory();
+    if (ramDir != null) {
+      if ( (getRAMUsed() + ramDir.sizeInBytes()) > this.ramBufferSize) {
+        return true;
+      } else return false;
+    }
+    return getRAMUsed() > this.ramBufferSize;
+  }
+  
   long numBytesAlloc;
   long numBytesUsed;
 
@@ -1111,10 +1123,9 @@
 
   // TODO FI: this is not flexible -- we can't hardwire
   // extensions in here:
-  private long segmentSize(String segmentName) throws IOException {
+  private long segmentSize(String segmentName, Directory directory) throws IOException {
     // Used only when infoStream != null
     assert infoStream != null;
-    
     long size = directory.fileLength(segmentName + ".tii") +
       directory.fileLength(segmentName + ".tis") +
       directory.fileLength(segmentName + ".frq") +
@@ -1123,10 +1134,22 @@
     final String normFileName = segmentName + ".nrm";
     if (directory.fileExists(normFileName))
       size += directory.fileLength(normFileName);
-
+    
     return size;
   }
-
+  
+  public long fileLength(String name) throws IOException {
+    Directory dir = writer.getDirectory();
+    Directory dir2 = writer.getRAMDirectory();
+    return fileLength(name, dir, dir2);
+  }
+  
+  public static long fileLength(String name, Directory dir1, Directory dir2) throws IOException {
+    if (dir1.fileExists(name)) {
+      return dir1.fileLength(name);
+    } else return dir2.fileLength(name);
+  }
+  
   // Coarse estimates used to measure RAM usage of buffered deletes
   final static int OBJECT_HEADER_BYTES = 8;
   final static int POINTER_NUM_BYTE = 4;
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 770396)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -84,13 +84,16 @@
     final int numSegments = infos.size();
     SegmentReader[] readers = new SegmentReader[numSegments];
     final Directory dir = writer.getDirectory();
+    final Directory flushDir = writer.getFlushDirectory();
     int upto = 0;
 
     for (int i=0;i<numSegments;i++) {
       boolean success = false;
       try {
         final SegmentInfo info = infos.info(upto);
-        if (info.dir == dir) {
+        // accept the regular and ram dirs (not the external dir 
+        // from addIndexesNoOptimize)
+        if (info.dir == dir || info.dir == flushDir) {
           readers[upto++] = writer.readerPool.getReadOnlyClone(info, true);
         }
         success = true;
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 770396)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -28,6 +28,7 @@
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.BufferedIndexInput;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Constants;
 
 import java.io.File;
@@ -353,6 +354,7 @@
   private HashSet mergingSegments = new HashSet();
 
   private MergePolicy mergePolicy = new LogByteSizeMergePolicy();
+  private MergePolicy ramMergePolicy = mergePolicy;
   private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
   private LinkedList pendingMerges = new LinkedList();
   private Set runningMerges = new HashSet();
@@ -370,6 +372,7 @@
   private Thread writeThread;                     // non-null if any thread holds write lock
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
+  private Directory ramDirectory;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -450,7 +453,7 @@
       return new ReadOnlyMultiSegmentReader(this, segmentInfos);
     }
   }
-
+  
   /** Holds shared SegmentReader instances. IndexWriter uses
    *  SegmentReaders for 1) applying deletes, 2) doing
    *  merges, 3) handing out a real-time reader.  This pool
@@ -847,7 +850,7 @@
    */
   public IndexWriter(String path, Analyzer a, boolean create, MaxFieldLength mfl)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit(), null, null);
+    init(FSDirectory.getDirectory(path), null, a, create, true, null, false, mfl.getLimit(), null, null);
   }
 
   /**
@@ -876,7 +879,7 @@
    */
   public IndexWriter(String path, Analyzer a, boolean create)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+    init(FSDirectory.getDirectory(path), null, a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
 
   /**
@@ -907,7 +910,7 @@
    */
   public IndexWriter(File path, Analyzer a, boolean create, MaxFieldLength mfl)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit(), null, null);
+    init(FSDirectory.getDirectory(path), null, a, create, true, null, false, mfl.getLimit(), null, null);
   }
 
   /**
@@ -936,7 +939,7 @@
    */
   public IndexWriter(File path, Analyzer a, boolean create)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+    init(FSDirectory.getDirectory(path), null, a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
 
   /**
@@ -967,7 +970,7 @@
    */
   public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, null, false, mfl.getLimit(), null, null);
+    init(d, null, a, create, false, null, false, mfl.getLimit(), null, null);
   }
 
   /**
@@ -995,7 +998,7 @@
    */
   public IndexWriter(Directory d, Analyzer a, boolean create)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+    init(d, null, a, create, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
 
   /**
@@ -1126,6 +1129,34 @@
     throws CorruptIndexException, LockObtainFailedException, IOException {
     init(d, a, false, null, false, mfl.getLimit(), null, null);
   }
+  
+  /**
+   * Constructs an IndexWriter for the index in
+   * <code>d</code>, first creating it if it does not
+   * already exist.  Text will be analyzed with
+   * <code>a</code>.
+   *
+   * <p><b>NOTE</b>: autoCommit (see <a
+   * href="#autoCommit">above</a>) is set to false with this
+   * constructor.
+   *
+   * @param d the index directory
+   * @param ramDir ram directory
+   * @param a the analyzer to use
+   * @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+   *   via the MaxFieldLength constructor.
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws LockObtainFailedException if another writer
+   *  has this index open (<code>write.lock</code> could not
+   *  be obtained)
+   * @throws IOException if the directory cannot be
+   *  read/written to or if there is any other low-level
+   *  IO error
+   */
+  public IndexWriter(Directory d, Directory ramDir, Analyzer a, MaxFieldLength mfl)
+    throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, ramDir, a, false, null, false, mfl.getLimit(), null, null);
+  }
 
   /**
    * Constructs an IndexWriter for the index in
@@ -1205,7 +1236,7 @@
    */
   public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
+    init(d, null, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
 
   /**
@@ -1292,7 +1323,7 @@
    */
   public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, deletionPolicy, false, mfl.getLimit(), null, null);
+    init(d, null, a, create, false, deletionPolicy, false, mfl.getLimit(), null, null);
   }
   
   /**
@@ -1329,7 +1360,7 @@
    */
   IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, deletionPolicy, false, mfl.getLimit(), indexingChain, commit);
+    init(d, null, a, create, false, deletionPolicy, false, mfl.getLimit(), indexingChain, commit);
   }
   
   /**
@@ -1362,7 +1393,7 @@
    */
   public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy)
           throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, create, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
+    init(d, null, a, create, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
   }
 
   /**
@@ -1401,27 +1432,34 @@
    *  <code>false</code> or if there is any other low-level
    *  IO error
    */
-  public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
+  public IndexWriter(Directory d, Directory ramDirectory, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
        throws CorruptIndexException, LockObtainFailedException, IOException {
-    init(d, a, false, false, deletionPolicy, false, mfl.getLimit(), null, commit);
+    init(d, ramDirectory, a, false, false, deletionPolicy, false, mfl.getLimit(), null, commit);
   }
 
   private void init(Directory d, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, 
+      boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
+     throws CorruptIndexException, LockObtainFailedException, IOException {
+    init(d, null, a, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+  }
+  
+  private void init(Directory d, Directory ramDirectory, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, 
                     boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
     throws CorruptIndexException, LockObtainFailedException, IOException {
     if (IndexReader.indexExists(d)) {
-      init(d, a, false, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+      init(d, ramDirectory, a, false, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
     } else {
-      init(d, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+      init(d, ramDirectory, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
     }
   }
 
-  private void init(Directory d, Analyzer a, final boolean create, boolean closeDir, 
+  private void init(Directory d, Directory ramDirectory, Analyzer a, final boolean create, boolean closeDir, 
                     IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength,
                     IndexingChain indexingChain, IndexCommit commit)
     throws CorruptIndexException, LockObtainFailedException, IOException {
     this.closeDir = closeDir;
     directory = d;
+    this.ramDirectory = ramDirectory;
     analyzer = a;
     setMessageID(defaultInfoStream);
     this.maxFieldLength = maxFieldLength;
@@ -1479,13 +1517,13 @@
       this.autoCommit = autoCommit;
       setRollbackSegmentInfos(segmentInfos);
 
-      docWriter = new DocumentsWriter(directory, this, indexingChain);
+      docWriter = new DocumentsWriter(this, indexingChain);
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
 
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
-      deleter = new IndexFileDeleter(directory,
+      deleter = new IndexFileDeleter(directory, ramDirectory, 
                                      deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
                                      segmentInfos, infoStream, docWriter);
 
@@ -1509,16 +1547,42 @@
       throw e;
     }
   }
-
+  
+  public Directory getFlushDirectory() {
+    if (ramDirectory != null) {
+      return ramDirectory;
+    } else return directory;
+  }
+  
+  public Directory getRAMDirectory() {
+    return ramDirectory;
+  }
+  
   private synchronized void setRollbackSegmentInfos(SegmentInfos infos) {
     rollbackSegmentInfos = (SegmentInfos) infos.clone();
-    assert !rollbackSegmentInfos.hasExternalSegments(directory);
+    assert !rollbackSegmentInfos.hasExternalSegments(directory, getFlushDirectory());
     rollbackSegments = new HashMap();
     final int size = rollbackSegmentInfos.size();
     for(int i=0;i<size;i++)
       rollbackSegments.put(rollbackSegmentInfos.info(i), new Integer(i));
   }
+  
+  /**
+   * Expert: set the ram directory merge policy used by this writer.
+   */
+  public void setRAMMergePolicy(MergePolicy mp) {
+    ensureOpen();
+    if (mp == null)
+      throw new NullPointerException("MergePolicy must be non-null");
 
+    if (ramMergePolicy != mp)
+      ramMergePolicy.close();
+    ramMergePolicy = mp;
+    pushMaxBufferedDocs();
+    if (infoStream != null)
+      message("setMergePolicy " + mp);
+  }
+  
   /**
    * Expert: set the merge policy used by this writer.
    */
@@ -2132,7 +2196,8 @@
       final String compoundFileName = docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;
 
       try {
-        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
+        Directory dir = getFlushDirectory();
+        CompoundFileWriter cfsWriter = new CompoundFileWriter(dir, compoundFileName);
         final Iterator it = docWriter.closedFiles().iterator();
         while(it.hasNext())
           cfsWriter.addFile((String) it.next());
@@ -2518,7 +2583,12 @@
       throw oom;
     }
   }
-
+  
+  //for test purpose
+  final synchronized SegmentInfos getSegmentInfos() {
+    return segmentInfos;
+  }
+  
   // for test purpose
   final synchronized int getSegmentCount(){
     return segmentInfos.size();
@@ -2870,13 +2940,43 @@
   private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException {
     maybeMerge(1, optimize);
   }
-
+  
+  long getRamBufferSizeLong() {
+    return (long)(getRAMBufferSizeMB()*(double)1024*(double)1024);
+  }
+  
   private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
-    updatePendingMerges(maxNumSegmentsOptimize, optimize);
+    updatePendingMerges(maxNumSegmentsOptimize, optimize, false);
     mergeScheduler.merge(this);
   }
-
-  private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize)
+  
+  void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize, boolean forceToDir) throws IOException {
+    SegmentInfos ramInfos = getRamDirSegmentInfos();
+    long ramSegSize = size(ramInfos);
+    long totalRamUsed = ramSegSize + docWriter.getRAMUsed();
+    System.out.println("totalRamUsed:"+totalRamUsed+" rambufsize:"+getRamBufferSizeLong());
+    if (forceToDir || (totalRamUsed > getRamBufferSizeLong() && ramDirectory != null)) {
+      // call updatePendingMerges, optimize the ramInfos to directory
+      updatePendingMerges(1, true, mergePolicy, ramInfos, directory);
+      updatePendingMerges(maxNumSegmentsOptimize, optimize, mergePolicy, segmentInfos, directory);
+    } else {
+      updatePendingMerges(maxNumSegmentsOptimize, optimize, mergePolicy, segmentInfos, directory);
+      updatePendingMerges(maxNumSegmentsOptimize, optimize, ramMergePolicy, ramInfos, ramDirectory);
+    }
+    if (forceToDir) { // this is kind of a hack
+      mergeScheduler.merge(this);
+    }
+  }
+  
+  static long size(SegmentInfos infos) throws IOException {
+    long size = 0;
+    for (int x=0; x < infos.size(); x++) {
+      size += infos.info(x).sizeInBytes();
+    }
+    return size;
+  }
+  
+  private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize, MergePolicy mergePolicy, SegmentInfos segmentInfos, Directory directory)
     throws CorruptIndexException, IOException {
     assert !optimize || maxNumSegmentsOptimize > 0;
 
@@ -2901,8 +3001,11 @@
 
     if (spec != null) {
       final int numMerges = spec.merges.size();
-      for(int i=0;i<numMerges;i++)
-        registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
+      for(int i=0;i<numMerges;i++) {
+        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(i);
+        merge.directory = directory; // set the destination directory
+        registerMerge(merge);
+      }
     }
   }
 
@@ -3528,7 +3631,7 @@
   }
 
   private boolean hasExternalSegments() {
-    return segmentInfos.hasExternalSegments(directory);
+    return segmentInfos.hasExternalSegments(directory, getFlushDirectory());
   }
 
   /* If any of our segments are using a directory != ours
@@ -3958,7 +4061,24 @@
     if (infoStream != null)
       message("commit: done");
   }
-
+  
+  public static SegmentInfos getInfosByDir(SegmentInfos infos, Directory dir) throws IOException {
+    SegmentInfos dirInfos = new SegmentInfos();
+    for (int x=0; x < infos.size(); x++) {
+      SegmentInfo info = infos.info(x);
+      if (info.dir == dir) {
+        dirInfos.add(info);
+      }
+    }
+    return dirInfos;
+  }
+  
+  SegmentInfos getRamDirSegmentInfos() throws IOException {
+    if (this.ramDirectory != null) 
+      return getInfosByDir(segmentInfos, ramDirectory);
+    return null;
+  }
+  
   /**
    * Flush all in-memory buffered udpates (adds and deletes)
    * to the Directory.
@@ -3970,7 +4090,6 @@
    *  be flushed
    */
   protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
-    // We can be called during close, when closing==true, so we must pass false to ensureOpen:
     ensureOpen(false);
     if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
       maybeMerge();
@@ -4089,9 +4208,10 @@
         // Create new SegmentInfo, but do not add to our
         // segmentInfos until deletes are flushed
         // successfully.
+        Directory segDir = getFlushDirectory();
         newSegment = new SegmentInfo(segment,
                                      flushedDocCount,
-                                     directory, false, true,
+                                     segDir, false, true,
                                      docStoreOffset, docStoreSegment,
                                      docStoreIsCompoundFile,    
                                      docWriter.hasProx());
@@ -4116,7 +4236,7 @@
         // Now build compound file
         boolean success = false;
         try {
-          docWriter.createCompoundFile(segment);
+          docWriter.createCompoundFile(segment, getFlushDirectory());
           success = true;
         } finally {
           if (!success) {
@@ -4160,19 +4280,22 @@
 
     int first = segmentInfos.indexOf(merge.segments.info(0));
     if (first == -1)
-      throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), directory);
+      throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), merge.directory);
 
     final int numSegments = segmentInfos.size();
     
     final int numSegmentsToMerge = merge.segments.size();
     for(int i=0;i<numSegmentsToMerge;i++) {
       final SegmentInfo info = merge.segments.info(i);
-
-      if (first + i >= numSegments || !segmentInfos.info(first+i).equals(info)) {
+      
+      boolean n1 = first + i >= numSegments;
+      boolean n2 = !segmentInfos.info(first+i).equals(info);
+      if (n1 || n2) {
+      //if (first + i >= numSegments || !segmentInfos.info(first+i).equals(info)) {
         if (segmentInfos.indexOf(info) == -1)
-          throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory);
+          throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), merge.directory);
         else
-          throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString(directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
+          throw new MergePolicy.MergeException("n1: "+n1+" n2:"+n2+" MergePolicy selected non-contiguous segments to merge (" + merge.segString(merge.directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
                                                directory);
       }
     }
@@ -4196,7 +4319,7 @@
     final SegmentInfos sourceSegments = merge.segments;
 
     if (infoStream != null)
-      message("commitMergeDeletes " + merge.segString(directory));
+      message("commitMergeDeletes " + merge.segString(merge.directory));
 
     // Carefully merge deletes that occurred after we
     // started merging:
@@ -4264,7 +4387,7 @@
       return false;
 
     if (infoStream != null)
-      message("commitMerge: " + merge.segString(directory) + " index=" + segString());
+      message("commitMerge: " + merge.segString(merge.directory) + " index=" + segString());
 
     assert merge.registerDone;
 
@@ -4276,7 +4399,7 @@
     // abort this merge
     if (merge.isAborted()) {
       if (infoStream != null)
-        message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
+        message("commitMerge: skipping merge " + merge.segString(merge.directory) + ": it was aborted");
 
       deleter.refresh(merge.info.name);
       return false;
@@ -4396,8 +4519,9 @@
           // This merge (and, generally, any change to the
           // segments) may now enable new merges, so we call
           // merge policy & update pending merges.
-          if (success && !merge.isAborted() && !closed && !closing)
-            updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
+          if (success && !merge.isAborted() && !closed && !closing) {
+            updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize, false);
+          }
         }
       }
     } catch (OutOfMemoryError oom) {
@@ -4421,7 +4545,7 @@
       merge.abort();
       throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.segString(directory));
     }
-
+    
     final int count = merge.segments.size();
     boolean isExternal = false;
     for(int i=0;i<count;i++) {
@@ -4430,7 +4554,7 @@
         return false;
       if (segmentInfos.indexOf(info) == -1)
         return false;
-      if (info.dir != directory)
+      if (info.dir != directory && info.dir != ramDirectory)
         isExternal = true;
     }
 
@@ -4591,19 +4715,27 @@
     // ConcurrentMergePolicy we keep deterministic segment
     // names.
     merge.info = new SegmentInfo(newSegmentName(), 0,
-                                 directory, false, true,
+                                 (merge.directory != null ? merge.directory : directory), false, true,
                                  docStoreOffset,
                                  docStoreSegment,
                                  docStoreIsCompoundFile,
                                  false);
-
+    //System.out.println("mergeInit "+merge.info.name+" si.dir:"+printDir(merge.info.dir));
+    
+    List segmentNames = TestIndexWriterRamDir.getSegmentNames(merge.segments);
+    System.out.println("_mergeInit:"+segmentNames+" -> "+merge.info.name+" "+this.printDir(merge.info.dir));
+    
     // Also enroll the merged segment into mergingSegments;
     // this prevents it from getting selected for a merge
     // after our merge is done but while we are building the
     // CFS:
     mergingSegments.add(merge.info);
   }
-
+  
+  private String printDir(Directory dir) {
+    return (dir == ramDirectory ? "ramdir" : "realdir");
+  }
+  
   /** This is called after merging a segment and before
    *  building its CFS.  Return true if the files should be
    *  sync'd.  If you return false, then the source segment
Index: src/java/org/apache/lucene/index/IndexFileDeleter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexFileDeleter.java	(revision 770396)
+++ src/java/org/apache/lucene/index/IndexFileDeleter.java	(working copy)
@@ -102,6 +102,7 @@
 
   private PrintStream infoStream;
   private Directory directory;
+  private Directory ramDirectory;
   private IndexDeletionPolicy policy;
   private DocumentsWriter docWriter;
 
@@ -129,7 +130,7 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
+  public IndexFileDeleter(Directory directory, Directory ramDirectory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
     throws CorruptIndexException, IOException {
 
     this.docWriter = docWriter;
@@ -140,6 +141,7 @@
 
     this.policy = policy;
     this.directory = directory;
+    this.ramDirectory = ramDirectory;
 
     // First pass: walk the files and initialize our ref
     // counts:
@@ -185,7 +187,7 @@
               sis = null;
             }
             if (sis != null) {
-              CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis);
+              CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, ramDirectory, sis);
               if (sis.getGeneration() == segmentInfos.getGeneration()) {
                 currentCommitPoint = commitPoint;
               }
@@ -213,7 +215,7 @@
       }
       if (infoStream != null)
         message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName());
-      currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
+      currentCommitPoint = new CommitPoint(commitsToDelete, directory, ramDirectory, sis);
       commits.add(currentCommitPoint);
       incRef(sis, true);
     }
@@ -395,7 +397,7 @@
 
     if (isCommit) {
       // Append to our commits list:
-      commits.add(new CommitPoint(commitsToDelete, directory, segmentInfos));
+      commits.add(new CommitPoint(commitsToDelete, directory, ramDirectory, segmentInfos));
 
       // Tell policy so it can remove commits:
       policy.onCommit(commits);
@@ -587,7 +589,7 @@
     final boolean isOptimized;
     final String userData;
 
-    public CommitPoint(Collection commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
+    public CommitPoint(Collection commitsToDelete, Directory directory, Directory ramDir, SegmentInfos segmentInfos) throws IOException {
       this.directory = directory;
       this.commitsToDelete = commitsToDelete;
       userData = segmentInfos.getUserData();
@@ -598,7 +600,7 @@
       gen = segmentInfos.getGeneration();
       isOptimized = segmentInfos.size() == 1 && !segmentInfos.info(0).hasDeletions();
 
-      assert !segmentInfos.hasExternalSegments(directory);
+      assert !segmentInfos.hasExternalSegments(directory, ramDir);
     }
 
     public boolean isOptimized() {
