Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 766772)
+++ src/java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -78,7 +78,11 @@
       synced.addAll(segmentInfos.files(directory, true));
     }
   }
-
+  
+  SegmentInfos getSegmentInfos() {
+    return segmentInfos;
+  }
+  
   boolean hasSegmentInfos() {
     return segmentInfos != null;
   }
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 766772)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -371,6 +371,8 @@
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
   
+  private RAMIndex ramIndex;
+  
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
   // then cooled to permanently record the event): it's
@@ -447,10 +449,36 @@
     // reader; in theory we could do similar retry logic,
     // just like we do when loading segments_N
     synchronized(this) {
-      return new ReadOnlyMultiSegmentReader(this, segmentInfos);
+      ReadOnlyMultiSegmentReader reader = new ReadOnlyMultiSegmentReader(this, segmentInfos);
+      if (ramIndex == null) {
+        return reader;
+      } else {
+        // if the ram index exists then add it to the readers
+        return new MultiReader(new IndexReader[] {reader, ramIndex.getReader()});
+      }
     }
   }
-
+  
+  /**
+   * Returns the current segment readers, skips segmentreaders with a 
+   * different directory.  Get reader returns cloned read only readers,
+   * this method returns the actual underlying segment readers.
+   * @return
+   */
+  SegmentReader[] getSegmentReaders() throws IOException {
+    SegmentInfos sis = segmentInfos;
+    List srs = new ArrayList(sis.size());
+    int numSegments = sis.size();
+    for (int i=0;i<numSegments;i++) {
+      final SegmentInfo info = sis.info(i);
+      SegmentReader sr = readerPool.get(info, true);
+      if (sr.directory == directory) {
+        srs.add(sr);
+      }
+    }
+    return (SegmentReader[])srs.toArray(new SegmentReader[0]);
+  }
+  
   /** Holds shared SegmentReader instances. IndexWriter uses
    *  SegmentReaders for 1) applying deletes, 2) doing
    *  merges, 3) handing out a real-time reader.  This pool
@@ -482,6 +510,10 @@
       }
     }
     
+    synchronized void add(SegmentInfo info, SegmentReader sr) throws IOException {
+      if (!readerMap.containsKey(info)) readerMap.put(info, sr);
+    }
+    
     // used only by asserts
     synchronized boolean infoIsLive(SegmentInfo info) {
       int idx = segmentInfos.indexOf(info);
@@ -1736,7 +1768,7 @@
     if (infoStream != null)
       message("setRAMBufferSizeMB " + mb);
   }
-
+  
   /**
    * Returns the value set by {@link #setRAMBufferSizeMB} if enabled.
    */
@@ -2375,6 +2407,7 @@
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
+      if (ramIndex != null) ramIndex.deleteDocument(term);
       boolean doFlush = docWriter.bufferDeleteTerm(term);
       if (doFlush)
         flush(true, false, false);
@@ -2400,6 +2433,7 @@
   public void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
+      if (ramIndex != null) ramIndex.deleteDocument(terms);
       boolean doFlush = docWriter.bufferDeleteTerms(terms);
       if (doFlush)
         flush(true, false, false);
@@ -2422,6 +2456,7 @@
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
+    if (ramIndex != null) ramIndex.deleteDocument(query);
     boolean doFlush = docWriter.bufferDeleteQuery(query);
     if (doFlush)
       flush(true, false, false);
@@ -2442,6 +2477,7 @@
    */
   public void deleteDocuments(Query[] queries) throws CorruptIndexException, IOException {
     ensureOpen();
+    if (ramIndex != null) ramIndex.deleteDocument(queries);
     boolean doFlush = docWriter.bufferDeleteQueries(queries);
     if (doFlush)
       flush(true, false, false);
@@ -3526,7 +3562,125 @@
       }
     }
   }
+  
+  /**
+   * Returns a RAMIndex that can be used to near realtime indexing.
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  public RAMIndex getRamIndex() throws CorruptIndexException, IOException {
+    synchronized (this) {
+      if (ramIndex == null) {
+        ramIndex = new RAMIndex(this);
+      }
+    }
+    return ramIndex;
+  }
+  
+  static private final SegmentReader[] indexReaderZeroArray = new SegmentReader[0];
+  
+  protected void gatherSubReaders(List allSubReaders, IndexReader r) {
+    IndexReader[] subReaders = r.getSequentialSubReaders();
+    if (subReaders == null) {
+      // Add the reader itself, and do not recurse
+      assert r instanceof SegmentReader;
+      allSubReaders.add(r);
+    } else {
+      for(int i=0;i<subReaders.length;i++) {
+        gatherSubReaders(allSubReaders, subReaders[i]);
+      }
+    }
+  }
+  
+  protected SegmentReader[] getSegmentReaders(IndexReader[] readers) {
+    List subReadersList = new ArrayList();
+    for (int x=0; x < readers.length; x++) {
+      gatherSubReaders(subReadersList, readers[x]);
+    }
+    SegmentReader[] segReaders = (SegmentReader[]) subReadersList.toArray(indexReaderZeroArray);
+    return segReaders;
+  }
+  
+  /**
+   * Similar to addIndexesNoOptimize(Directory[] dirs).  
+   * 
+   * Because of the way merging works in IndexWriter, SegmentInfos must be obtained from the readers.
+   * Individual SegmentReaders are obtained and added to the readerPool so 
+   * that when mergeMiddle performs the actual merging, it obtains the readers 
+   * passed into this method (as opposed to loading them from SegmentInfo.dir)
+   * 
+   * NOTE: Unlike addIndexesNoOptimize(Directory[] dirs) docWriter.pauseAllThreads isn't used
+   * 
+   * @param readers Need to be DirectoryIndexReaders so that infos can be obtained from them
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  void addIndexesNoOptimize(DirectoryIndexReader[] readers) throws CorruptIndexException, IOException {
+    ensureOpen();
+    // Do not allow add docs or deletes while we are running:
+    //docWriter.pauseAllThreads(); // Not sure this is required in this
 
+    try {
+      if (infoStream != null)
+        message("flush at addIndexesNoOptimize");
+      flush(true, false, true);
+
+      boolean success = false;
+
+      startTransaction(false);
+
+      try {
+        int docCount = 0;
+        
+        synchronized(this) {
+          ensureOpen();
+          SegmentReader[] segReaders = getSegmentReaders(readers);
+          for (int j = 0; j < segReaders.length; j++) {
+            SegmentInfo info = segReaders[j].getSegmentInfo();
+            assert info != null;
+            readerPool.add(info, segReaders[j]);
+            if (directory == info.dir) {
+              // cannot add this index: segments may be deleted in merge before added
+              throw new IllegalArgumentException("Cannot add this index to itself");
+            }
+            docCount += info.docCount;
+            segmentInfos.add(info); // add each info
+            readerPool.add(info, segReaders[j]);
+          }
+        }
+        // Notify DocumentsWriter that the flushed count just increased
+        docWriter.updateFlushedDocCount(docCount);
+        
+        maybeMerge();
+        ensureOpen();
+        
+        // If after merging there remain segments in the index
+        // that are in a different directory, just copy these
+        // over into our index.  This is necessary (before
+        // finishing the transaction) to avoid leaving the
+        // index in an unusable (inconsistent) state.
+        resolveExternalSegments();
+        
+        ensureOpen();
+        
+        success = true;
+      } finally {
+        if (success) {
+          commitTransaction();  // is this required for this method?
+        } else {
+          rollbackTransaction();
+        }
+      }
+    } catch (OutOfMemoryError oom) {
+      hitOOM = true;
+      throw oom;
+    } finally {
+      //if (docWriter != null) {
+      //  docWriter.resumeAllThreads();
+      //}
+    }
+  }
+
   private boolean hasExternalSegments() {
     return segmentInfos.hasExternalSegments(directory);
   }
Index: src/java/org/apache/lucene/index/RAMIndex.java
===================================================================
--- src/java/org/apache/lucene/index/RAMIndex.java	(revision 0)
+++ src/java/org/apache/lucene/index/RAMIndex.java	(revision 0)
@@ -0,0 +1,234 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.RAMDirectory;
+
+/**
+ * RAM Index for use with IndexWriter.
+ */
+public class RAMIndex {
+  /**
+   * Default value is 16 MB (which means the ram index flushes when buffered
+   * docs consume 16 MB RAM). Change using {@link #setRAMIndexSizeMB}.
+   */
+  public final static double DEFAULT_RAM_INDEX_SIZE_MB = 16.0;
+  private IndexWriter _ramWriter;
+  private IndexWriter _toDiskWriter;
+  private RAMDirectory ramDirectory;
+  private IndexWriter diskWriter;
+  private long maxSize;
+  private List deleteTerms = new ArrayList();
+  private List deleteQueries = new ArrayList();
+  private Object flushLock = new Object();
+  private Object toDiskWriterLock = new Object();
+  private boolean isFlushing = false;
+
+  public RAMIndex(IndexWriter diskWriter) throws IOException {
+    this.diskWriter = diskWriter;
+    this.maxSize = (long) DEFAULT_RAM_INDEX_SIZE_MB * 1024 * 1024;
+    init();
+  }
+
+  private IndexWriter toDiskWriter() {
+    synchronized (toDiskWriterLock) {
+      return _toDiskWriter;
+    }
+  }
+
+  private void setToDiskWriter(IndexWriter toDiskWriter) {
+    synchronized (toDiskWriterLock) {
+      assert _toDiskWriter != null;
+      _toDiskWriter = toDiskWriter;
+    }
+  }
+
+  public void setMaxRamSize(double mb) {
+    maxSize = (long) DEFAULT_RAM_INDEX_SIZE_MB * 1024 * 1024;
+  }
+
+  private void addDeleteTerm(Term[] terms) {
+    synchronized (deleteTerms) {
+      for (int x = 0; x < terms.length; x++) {
+        deleteTerms.add(terms[x]);
+      }
+    }
+  }
+
+  private void addDeleteQuery(Query[] queries) {
+    synchronized (deleteQueries) {
+      for (int x = 0; x < queries.length; x++) {
+        deleteQueries.add(queries[x]);
+      }
+    }
+  }
+
+  /**
+   * Get the ram writer to set the merge scheduler or merge factor.
+   * 
+   * @return
+   */
+  public synchronized IndexWriter getRamWriter() {
+    return _ramWriter;
+  }
+
+  /**
+   * 
+   */
+  // TODO: return a composite of all readers (i.e. those being flushed and those
+  // of the current ramWriter
+  public synchronized IndexReader getReader() throws IOException {
+    applyDeletes(); // ok to call as it also applies deletes to the diskWriter?
+    List readers = new ArrayList();
+    if (toDiskWriter() != null) {
+      SegmentReader[] rs = _toDiskWriter.getSegmentReaders();
+      for (int x = 0; x < rs.length; x++) {
+        readers.add(rs[x]);
+      }
+    }
+    SegmentReader[] ramSrs = getRamWriter().getSegmentReaders();
+    for (int x = 0; x < ramSrs.length; x++) {
+      readers.add(ramSrs[x]);
+    }
+    if (readers.size() == 1) {
+      return (IndexReader) readers.get(0);
+    }
+    if (readers.size() > 1) {
+      IndexReader[] array = (IndexReader[]) readers.toArray(new IndexReader[0]);
+      return new MultiReader(array);
+    }
+    throw new RuntimeException("should not be reached");
+  }
+
+  private void applyDeletes() throws IOException {
+    if (_toDiskWriter != null) {
+      for (int x = 0; x < deleteTerms.size(); x++) {
+        Term term = (Term) deleteTerms.get(x);
+        _toDiskWriter.deleteDocuments(term);
+      }
+      for (int x = 0; x < deleteQueries.size(); x++) {
+        Query query = (Query) deleteQueries.get(x);
+        _toDiskWriter.deleteDocuments(query);
+      }
+      // flush the deletes to the writer so they show up
+      // in the reader that is being copied to disk
+      // when diskWriter.commitMergeDeletes is called
+      // the deletes will be copied to the new segment
+      _toDiskWriter.flush(false, false, true);
+    }
+    assert _ramWriter != null;
+    for (int x = 0; x < deleteTerms.size(); x++) {
+      Term term = (Term) deleteTerms.get(x);
+      _ramWriter.deleteDocuments(term);
+    }
+    for (int x = 0; x < deleteQueries.size(); x++) {
+      Query query = (Query) deleteQueries.get(x);
+      _ramWriter.deleteDocuments(query);
+    }
+    for (int x = 0; x < deleteTerms.size(); x++) {
+      Term term = (Term) deleteTerms.get(x);
+      diskWriter.deleteDocuments(term);
+    }
+    for (int x = 0; x < deleteQueries.size(); x++) {
+      Query query = (Query) deleteQueries.get(x);
+      diskWriter.deleteDocuments(query);
+    }
+    deleteTerms.clear();
+    deleteQueries.clear();
+  }
+
+  private synchronized void init() throws IOException {
+    IndexWriter oldWriter = _ramWriter;
+    ramDirectory = new RAMDirectory();
+    IndexWriter newWriter = new IndexWriter(ramDirectory, diskWriter.getAnalyzer(), true,
+        new MaxFieldLength(diskWriter.getMaxFieldLength()));
+    if (oldWriter != null) {
+      newWriter.setMergeScheduler(oldWriter.getMergeScheduler());
+      newWriter.setMergeFactor(oldWriter.getMergeFactor());
+      newWriter.setMaxBufferedDeleteTerms(oldWriter.getMaxBufferedDeleteTerms());
+      newWriter.setMaxFieldLength(oldWriter.getMaxFieldLength());
+      newWriter.setMaxMergeDocs(oldWriter.getMaxMergeDocs());
+      // _ramWriter.setMergedSegmentWarmer(oldWriter.get);
+      newWriter.setInfoStream(oldWriter.getInfoStream());
+      newWriter.setTermIndexInterval(oldWriter.getTermIndexInterval());
+      newWriter.setUseCompoundFile(oldWriter.getUseCompoundFile());
+      newWriter.setSimilarity(oldWriter.getSimilarity());
+    }
+    _ramWriter = newWriter;
+  }
+
+  public void addDocument(Document document, Analyzer analyzer) throws CorruptIndexException, IOException {
+    getRamWriter().addDocument(document, analyzer);
+    ifFlush();
+  }
+
+  public void updateDocument(Term term, Document document, Analyzer analyzer) throws CorruptIndexException, IOException {
+    getRamWriter().addDocument(document, analyzer);
+    addDeleteTerm(new Term[] { term });
+    ifFlush();
+  }
+
+  public void deleteDocument(Query query) throws CorruptIndexException, IOException {
+    addDeleteQuery(new Query[] { query });
+    ifFlushDeletes();
+  }
+
+  public void deleteDocument(Term term) throws CorruptIndexException, IOException {
+    addDeleteTerm(new Term[] { term });
+    ifFlushDeletes();
+  }
+
+  public void deleteDocument(Query[] queries) throws CorruptIndexException, IOException {
+    addDeleteQuery(queries);
+    ifFlushDeletes();
+  }
+
+  public void deleteDocument(Term[] terms) throws CorruptIndexException, IOException {
+    addDeleteTerm(terms);
+    ifFlushDeletes();
+  }
+
+  private void ifFlushDeletes() throws CorruptIndexException, IOException {
+    // if there's a toDiskWriter, apply the deletes
+    // so they are applied to the readers being copied to disk
+    if (toDiskWriter() != null) {
+      applyDeletes();
+    }
+  }
+
+  private void ifFlush() throws CorruptIndexException, IOException {
+    ifFlushDeletes();
+    if (ramDirectory.sizeInBytes() + getRamWriter().ramSizeInBytes() >= maxSize) {
+      flush();
+    }
+  }
+
+  /**
+   * Flushes the ramWriter to the underlying writer.
+   * 
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  public void flush() throws CorruptIndexException, IOException {
+    if (isFlushing)
+      return;
+    synchronized (flushLock) {
+      isFlushing = true;
+      DirectoryIndexReader ramReader = null;
+      applyDeletes();
+      ramReader = (DirectoryIndexReader) getReader();
+      _toDiskWriter = _ramWriter;
+      // subReaders = IndexWriter.getAllSubDirReaders(ramReader);
+      init(); // create a new ram writer
+      diskWriter.addIndexesNoOptimize(new DirectoryIndexReader[] { ramReader });
+      _toDiskWriter = null;
+      isFlushing = false;
+    }
+  }
+}
Index: src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterReader.java	(revision 766772)
+++ src/test/org/apache/lucene/index/TestIndexWriterReader.java	(working copy)
@@ -521,10 +521,15 @@
    */
   public static void createIndex(Directory dir1, String indexName,
       boolean multiSegment) throws IOException {
+    createIndex(dir1, indexName, multiSegment, 100);
+  }
+  
+  public static void createIndex(Directory dir1, String indexName,
+      boolean multiSegment, int num) throws IOException {
     IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(),
         IndexWriter.MaxFieldLength.LIMITED);
     w.setMergePolicy(new LogDocMergePolicy());
-    for (int i = 0; i < 100; i++) {
+    for (int i = 0; i < num; i++) {
       w.addDocument(createDocument(i, indexName, 4));
       if (multiSegment && (i % 10) == 0) {
       }
@@ -534,11 +539,16 @@
     }
     w.close();
   }
-
+  
   public static void createIndexNoClose(boolean multiSegment, String indexName,
       IndexWriter w) throws IOException {
+    createIndexNoClose(multiSegment, indexName, w, 100);
+  }
+  
+  public static void createIndexNoClose(boolean multiSegment, String indexName,
+      IndexWriter w, int num) throws IOException {
     w.setMergePolicy(new LogDocMergePolicy());
-    for (int i = 0; i < 100; i++) {
+    for (int i = 0; i < num; i++) {
       w.addDocument(createDocument(i, indexName, 4));
     }
     if (!multiSegment) {
Index: src/test/org/apache/lucene/index/TestIndexWriterRealtime.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterRealtime.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexWriterRealtime.java	(revision 0)
@@ -0,0 +1,71 @@
+package org.apache.lucene.index;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexWriterRealtime extends LuceneTestCase {
+  public void testRamIndex() throws Exception {
+    int numDocs = 100;
+
+    Directory dir = new MockRAMDirectory();
+    IndexWriter dirWriter = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    TestIndexWriterReader.createIndexNoClose(false, "main", dirWriter, numDocs);
+
+    RAMIndex ramIndex = dirWriter.getRamIndex();
+    TestIndexWriterReader.createIndexNoClose(false, "ramindex", ramIndex
+        .getRamWriter(), numDocs);
+
+    IndexReader reader = dirWriter.getReader();
+    assertEquals(numDocs * 2, reader.maxDoc());
+
+    ramIndex.flush();
+    assertEquals(numDocs * 2, reader.maxDoc());
+
+    reader.close();
+    dirWriter.close();
+
+    dir.close();
+  }
+
+  /**
+   * Tests IndexWriter.addIndexesNoOptimize(DirectoryIndexReader[] readers)
+   * 
+   * @throws Throwable
+   */
+  public void testAddIndexes() throws Throwable {
+    Directory ramDir1 = new MockRAMDirectory();
+    int numDocs = 100;
+    TestIndexWriterReader.createIndex(ramDir1, "ramindex1", false, numDocs);
+    DirectoryIndexReader ramReader1 = (DirectoryIndexReader) IndexReader
+        .open(ramDir1);
+
+    Directory ramDir2 = new MockRAMDirectory();
+    IndexWriter ramWriter2 = new IndexWriter(ramDir2, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    TestIndexWriterReader.createIndexNoClose(false, "ramindex2", ramWriter2,
+        numDocs);
+    ramWriter2.close();
+    DirectoryIndexReader ramReader2 = (DirectoryIndexReader) IndexReader
+        .open(ramDir2);
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.addIndexesNoOptimize(new DirectoryIndexReader[] { ramReader1 }); // ramReader2});
+    writer.commit();
+    IndexReader rw1 = writer.getReader();
+    assertEquals(rw1.maxDoc(), numDocs);
+    rw1.close();
+    ramReader2.close();
+    ramReader1.close();
+
+    writer.close();
+
+    ramDir1.close();
+    ramDir2.close();
+    dir1.close();
+  }
+}
