Index: src/test/org/apache/lucene/index/TestIndexWriterRealtime.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterRealtime.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexWriterRealtime.java	(revision 0)
@@ -0,0 +1,71 @@
+package org.apache.lucene.index;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexWriterRealtime extends LuceneTestCase {
+  public void testRamIndex() throws Exception {
+    int numDocs = 100;
+
+    Directory dir = new MockRAMDirectory();
+    IndexWriter dirWriter = new IndexWriter(dir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    TestIndexWriterReader.createIndexNoClose(false, "main", dirWriter, numDocs);
+
+    RAMIndex ramIndex = dirWriter.getRamIndex();
+    TestIndexWriterReader.createIndexNoClose(false, "ramindex", ramIndex
+        .getRamWriter(), numDocs);
+
+    IndexReader reader = dirWriter.getReader();
+    assertEquals(numDocs * 2, reader.maxDoc());
+
+    ramIndex.flush();
+    assertEquals(numDocs * 2, reader.maxDoc());
+
+    reader.close();
+    dirWriter.close();
+
+    dir.close();
+  }
+
+  /**
+   * Tests IndexWriter.addIndexesNoOptimize(DirectoryIndexReader[] readers)
+   * 
+   * @throws Throwable
+   */
+  public void testAddIndexes() throws Throwable {
+    Directory ramDir1 = new MockRAMDirectory();
+    int numDocs = 100;
+    TestIndexWriterReader.createIndex(ramDir1, "ramindex1", false, numDocs);
+    DirectoryIndexReader ramReader1 = (DirectoryIndexReader) IndexReader
+        .open(ramDir1);
+
+    Directory ramDir2 = new MockRAMDirectory();
+    IndexWriter ramWriter2 = new IndexWriter(ramDir2, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    TestIndexWriterReader.createIndexNoClose(false, "ramindex2", ramWriter2,
+        numDocs);
+    ramWriter2.close();
+    DirectoryIndexReader ramReader2 = (DirectoryIndexReader) IndexReader
+        .open(ramDir2);
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.addIndexesNoOptimize(new DirectoryIndexReader[] { ramReader1 }); // ramReader2});
+    writer.commit();
+    IndexReader rw1 = writer.getReader();
+    assertEquals(rw1.maxDoc(), numDocs);
+    rw1.close();
+    ramReader2.close();
+    ramReader1.close();
+
+    writer.close();
+
+    ramDir1.close();
+    ramDir2.close();
+    dir1.close();
+  }
+}

Property changes on: src/test/org/apache/lucene/index/TestIndexWriterRealtime.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterReader.java	(revision 766128)
+++ src/test/org/apache/lucene/index/TestIndexWriterReader.java	(working copy)
@@ -521,10 +521,15 @@
    */
   public static void createIndex(Directory dir1, String indexName,
       boolean multiSegment) throws IOException {
+    createIndex(dir1, indexName, multiSegment, 100);
+  }
+  
+  public static void createIndex(Directory dir1, String indexName,
+      boolean multiSegment, int num) throws IOException {
     IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(),
         IndexWriter.MaxFieldLength.LIMITED);
     w.setMergePolicy(new LogDocMergePolicy());
-    for (int i = 0; i < 100; i++) {
+    for (int i = 0; i < num; i++) {
       w.addDocument(createDocument(i, indexName, 4));
       if (multiSegment && (i % 10) == 0) {
       }
@@ -534,11 +539,16 @@
     }
     w.close();
   }
-
+  
   public static void createIndexNoClose(boolean multiSegment, String indexName,
       IndexWriter w) throws IOException {
+    createIndexNoClose(multiSegment, indexName, w, 100);
+  }
+  
+  public static void createIndexNoClose(boolean multiSegment, String indexName,
+      IndexWriter w, int num) throws IOException {
     w.setMergePolicy(new LogDocMergePolicy());
-    for (int i = 0; i < 100; i++) {
+    for (int i = 0; i < num; i++) {
       w.addDocument(createDocument(i, indexName, 4));
     }
     if (!multiSegment) {
Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 766128)
+++ src/java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -78,7 +78,11 @@
       synced.addAll(segmentInfos.files(directory, true));
     }
   }
-
+  
+  SegmentInfos getSegmentInfos() {
+    return segmentInfos;
+  }
+  
   boolean hasSegmentInfos() {
     return segmentInfos != null;
   }
Index: src/java/org/apache/lucene/index/RAMIndex.java
===================================================================
--- src/java/org/apache/lucene/index/RAMIndex.java	(revision 0)
+++ src/java/org/apache/lucene/index/RAMIndex.java	(revision 0)
@@ -0,0 +1,91 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter.MaxFieldLength;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.RAMDirectory;
+
+/**
+ * RAM Index for use with IndexWriter.
+ */
+public class RAMIndex {
+  /**
+   * Default value is 16 MB (which means the ram index flushes when buffered
+   * docs consume 16 MB RAM).  Change using {@link #setRAMIndexSizeMB}.
+   */
+  public final static double DEFAULT_RAM_INDEX_SIZE_MB = 16.0;
+  private IndexWriter _ramWriter;
+  private RAMDirectory ramDirectory;
+  private IndexWriter diskWriter;
+  private long maxSize;
+  
+  public RAMIndex(IndexWriter diskWriter) throws IOException {
+    this.diskWriter = diskWriter;
+    this.maxSize = (long)DEFAULT_RAM_INDEX_SIZE_MB*1024*1024;
+    init();
+  }
+  
+  public void setMaxRamSize(double mb) {
+    maxSize = (long)DEFAULT_RAM_INDEX_SIZE_MB*1024*1024;
+  }
+  
+  /**
+   * Get the ram writer to set the merge scheduler or merge factor.
+   * @return
+   */
+  public synchronized IndexWriter getRamWriter() {
+    return _ramWriter;
+  }
+  
+  public IndexReader getReader() throws IOException {
+    return getRamWriter().getReader();
+  }
+  
+  private synchronized void init() throws IOException {
+    ramDirectory = new RAMDirectory();
+    _ramWriter = new IndexWriter(ramDirectory, diskWriter.getAnalyzer(),
+        true, new MaxFieldLength(diskWriter.getMaxFieldLength()));
+  }
+  
+  public void addDocument(Document document, Analyzer analyzer)
+      throws CorruptIndexException, IOException {
+    getRamWriter().addDocument(document, analyzer);
+    ifFlush();
+  }
+  
+  public void updateDocument(Term term, Document document, Analyzer analyzer) throws CorruptIndexException, IOException {
+    getRamWriter().updateDocument(term, document, analyzer);
+    diskWriter.deleteDocuments(term);
+    ifFlush();
+  }
+  
+  public void deleteDocument(Query[] queries) throws CorruptIndexException, IOException {
+    getRamWriter().deleteDocuments(queries);
+    diskWriter.deleteDocuments(queries);
+  }
+  
+  public void deleteDocument(Term[] terms) throws CorruptIndexException, IOException {
+    getRamWriter().deleteDocuments(terms);
+    diskWriter.deleteDocuments(terms);
+  }
+  
+  private void ifFlush() throws CorruptIndexException, IOException {
+    if (ramDirectory.sizeInBytes()+getRamWriter().ramSizeInBytes() >= maxSize) {
+      flush();
+    }
+  }
+  
+  /**
+   * Flushes the ramWriter to the underlying writer.
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  public synchronized void flush() throws CorruptIndexException, IOException {
+    DirectoryIndexReader ramReader = (DirectoryIndexReader)getRamWriter().getReader();
+    diskWriter.addIndexesNoOptimize(new DirectoryIndexReader[] {ramReader});
+    init();
+  }
+}

Property changes on: src/java/org/apache/lucene/index/RAMIndex.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 766128)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -371,6 +371,8 @@
   final ReaderPool readerPool = new ReaderPool();
   private int upgradeCount;
   
+  private RAMIndex ramIndex;
+  
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
   // then cooled to permanently record the event): it's
@@ -447,7 +449,13 @@
     // reader; in theory we could do similar retry logic,
     // just like we do when loading segments_N
     synchronized(this) {
-      return new ReadOnlyMultiSegmentReader(this, segmentInfos);
+      ReadOnlyMultiSegmentReader reader = new ReadOnlyMultiSegmentReader(this, segmentInfos);
+      if (ramIndex == null) {
+        return reader;
+      } else {
+        // if the ram index exists then add it to the readers
+        return new MultiReader(new IndexReader[] {reader, ramIndex.getReader()});
+      }
     }
   }
 
@@ -482,6 +490,10 @@
       }
     }
     
+    synchronized void add(SegmentInfo info, SegmentReader sr) throws IOException {
+      if (!readerMap.containsKey(info)) readerMap.put(info, sr);
+    }
+    
     // used only by asserts
     synchronized boolean infoIsLive(SegmentInfo info) {
       int idx = segmentInfos.indexOf(info);
@@ -1736,7 +1748,7 @@
     if (infoStream != null)
       message("setRAMBufferSizeMB " + mb);
   }
-
+  
   /**
    * Returns the value set by {@link #setRAMBufferSizeMB} if enabled.
    */
@@ -3526,7 +3538,125 @@
       }
     }
   }
+  
+  /**
+   * Returns a RAMIndex that can be used to near realtime indexing.
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  public RAMIndex getRamIndex() throws CorruptIndexException, IOException {
+    synchronized (this) {
+      if (ramIndex == null) {
+        ramIndex = new RAMIndex(this);
+      }
+    }
+    return ramIndex;
+  }
+  
+  static private final SegmentReader[] indexReaderZeroArray = new SegmentReader[0];
+  
+  protected void gatherSubReaders(List allSubReaders, IndexReader r) {
+    IndexReader[] subReaders = r.getSequentialSubReaders();
+    if (subReaders == null) {
+      // Add the reader itself, and do not recurse
+      assert r instanceof SegmentReader;
+      allSubReaders.add(r);
+    } else {
+      for(int i=0;i<subReaders.length;i++) {
+        gatherSubReaders(allSubReaders, subReaders[i]);
+      }
+    }
+  }
+  
+  protected SegmentReader[] getSegmentReaders(IndexReader[] readers) {
+    List subReadersList = new ArrayList();
+    for (int x=0; x < readers.length; x++) {
+      gatherSubReaders(subReadersList, readers[x]);
+    }
+    SegmentReader[] segReaders = (SegmentReader[]) subReadersList.toArray(indexReaderZeroArray);
+    return segReaders;
+  }
+  
+  /**
+   * Similar to addIndexesNoOptimize(Directory[] dirs).  
+   * 
+   * Because of the way merging works in IndexWriter, SegmentInfos must be obtained from the readers.
+   * Individual SegmentReaders are obtained and added to the readerPool so 
+   * that when mergeMiddle performs the actual merging, it obtains the readers 
+   * passed into this method (as opposed to loading them from SegmentInfo.dir)
+   * 
+   * NOTE: Unlike addIndexesNoOptimize(Directory[] dirs) docWriter.pauseAllThreads isn't used
+   * 
+   * @param readers Need to be DirectoryIndexReaders so that infos can be obtained from them
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  void addIndexesNoOptimize(DirectoryIndexReader[] readers) throws CorruptIndexException, IOException {
+    ensureOpen();
+    // Do not allow add docs or deletes while we are running:
+    //docWriter.pauseAllThreads(); // Not sure this is required in this
 
+    try {
+      if (infoStream != null)
+        message("flush at addIndexesNoOptimize");
+      flush(true, false, true);
+
+      boolean success = false;
+
+      startTransaction(false);
+
+      try {
+        int docCount = 0;
+        
+        synchronized(this) {
+          ensureOpen();
+          SegmentReader[] segReaders = getSegmentReaders(readers);
+          for (int j = 0; j < segReaders.length; j++) {
+            SegmentInfo info = segReaders[j].getSegmentInfo();
+            assert info != null;
+            readerPool.add(info, segReaders[j]);
+            if (directory == info.dir) {
+              // cannot add this index: segments may be deleted in merge before added
+              throw new IllegalArgumentException("Cannot add this index to itself");
+            }
+            docCount += info.docCount;
+            segmentInfos.add(info); // add each info
+            readerPool.add(info, segReaders[j]);
+          }
+        }
+        // Notify DocumentsWriter that the flushed count just increased
+        docWriter.updateFlushedDocCount(docCount);
+        
+        maybeMerge();
+        ensureOpen();
+        
+        // If after merging there remain segments in the index
+        // that are in a different directory, just copy these
+        // over into our index.  This is necessary (before
+        // finishing the transaction) to avoid leaving the
+        // index in an unusable (inconsistent) state.
+        resolveExternalSegments();
+        
+        ensureOpen();
+        
+        success = true;
+      } finally {
+        if (success) {
+          commitTransaction();  // is this required for this method?
+        } else {
+          rollbackTransaction();
+        }
+      }
+    } catch (OutOfMemoryError oom) {
+      hitOOM = true;
+      throw oom;
+    } finally {
+      //if (docWriter != null) {
+      //  docWriter.resumeAllThreads();
+      //}
+    }
+  }
+
   private boolean hasExternalSegments() {
     return segmentInfos.hasExternalSegments(directory);
   }
