Index: src/java/org/apache/lucene/index/DirectoryReader.java =================================================================== --- src/java/org/apache/lucene/index/DirectoryReader.java (revision 796572) +++ src/java/org/apache/lucene/index/DirectoryReader.java (working copy) @@ -143,9 +143,9 @@ boolean success = false; try { final SegmentInfo info = infos.info(upto); - if (info.dir == dir) { + //if (info.dir == dir) { readers[upto++] = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor); - } + //} success = true; } finally { if (!success) { Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 796572) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -326,7 +326,7 @@ private int localFlushedDocCount; // saved docWriter.getFlushedDocCount during local transaction private boolean autoCommit = true; // false if we should commit only on close - private SegmentInfos segmentInfos = new SegmentInfos(); // the segments + SegmentInfos segmentInfos = new SegmentInfos(); // the segments private DocumentsWriter docWriter; private IndexFileDeleter deleter; @@ -2148,7 +2148,17 @@ return false; } } - + + private void printAllFiles(String name) throws IOException { + Collection files = segmentInfos.files(directory, true); + Iterator it = files.iterator(); + System.out.println(name); + while (it.hasNext()) { + String f = (String)it.next(); + System.out.println(" "+f); + } + } + private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException { docWriter.pauseAllThreads(); @@ -2164,7 +2174,9 @@ if (!hitOOM) { flush(waitForMerges, true, true); } - + printAllFiles("beforeResolve"); + resolveExternalSegments(); + printAllFiles("afterResolve"); if (waitForMerges) // Give merge scheduler last chance to run, in case // any pending merges are waiting: @@ -3706,7 +3718,69 @@ } } } + + /** + * Asynchronously copies the indexes in the given directories to this + * directory by allowing the natural merge process to occur. + * Whereas the other addIndexes* methods do not allow documents to + * be added or deleted while it is proceeding, this method does. + * Also getReader may be called immediately after and will include + * the added indexes. + * + * @param dirs + * @param copy When true, the segments are copied into this directory + * @throws CorruptIndexException + * @throws IOException + */ + public void addIndexesAsync(Directory[] dirs, boolean copy) throws CorruptIndexException, IOException { + ensureOpen(); + noDupDirs(dirs); + if (infoStream != null) + message("flush at addIndexesNoCommit"); + boolean success = false; + + try { + int docCount = 0; + // synchronize here because we're altering segmentInfos + synchronized(this) { + ensureOpen(); + + for (int i = 0; i < dirs.length; i++) { + if (directory == dirs[i]) { + // cannot add this index: segments may be deleted in merge before added + throw new IllegalArgumentException("Cannot add this index to itself"); + } + + SegmentInfos sis = new SegmentInfos(); // read infos from dir + sis.read(dirs[i]); + for (int j = 0; j < sis.size(); j++) { + SegmentInfo info = sis.info(j); + assert !segmentInfos.contains(info): "dup info dir=" + info.dir + " name=" + info.name; + docCount += info.docCount; + segmentInfos.add(info); // add each info + } + } + } + + // Notify DocumentsWriter that the flushed count just increased + docWriter.updateFlushedDocCount(docCount); + + maybeMerge(); + + ensureOpen(); + // when copy is true, insure there are no more + // segments from the source directories + if (copy) resolveExternalSegments(); + + ensureOpen(); + + success = true; + } catch (OutOfMemoryError oom) { + handleOOM(oom, "addIndexesNoCommit"); + } + } + private boolean hasExternalSegments() { return segmentInfos.hasExternalSegments(directory); } @@ -5352,7 +5426,9 @@ if (sizeInBytes > 0) syncPause(sizeInBytes); - + + if (!stopMerges) resolveExternalSegments(); + SegmentInfos toSync = null; final long myChangeCount; Index: src/test/org/apache/lucene/index/TestAddIndexesAsync.java =================================================================== --- src/test/org/apache/lucene/index/TestAddIndexesAsync.java (revision 0) +++ src/test/org/apache/lucene/index/TestAddIndexesAsync.java (revision 0) @@ -0,0 +1,67 @@ +package org.apache.lucene.index; + +import org.apache.lucene.analysis.WhitespaceAnalyzer; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.util.LuceneTestCase; + +public class TestAddIndexesAsync extends LuceneTestCase { + public void testCopy() throws Exception { + Directory dir = new MockRAMDirectory(); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer.setMergePolicy(new LogDocMergePolicy()); + + TestAddIndexesNoOptimize.addDocs(writer, 200); + + Directory addDir = new MockRAMDirectory(); + TestIndexWriterReader.createIndex(addDir, "beingAdded", true); + + writer.addIndexesAsync(new Directory[] { addDir }, true); + + synchronized (writer.segmentInfos) { + for (int x = 0; x < writer.segmentInfos.size(); x++) { + SegmentInfo info = writer.segmentInfos.info(x); + // make sure all of the infos are from our main dir + assertTrue(info.dir == dir); + } + } + IndexReader reader = writer.getReader(); + assertEquals(300, reader.numDocs()); + reader.close(); + writer.close(); + dir.close(); + } + + public void testNoCopy() throws Exception { + Directory dir = new MockRAMDirectory(); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer.setMergePolicy(new LogDocMergePolicy()); + writer.setInfoStream(System.out); + + TestAddIndexesNoOptimize.addDocs(writer, 200); + + Directory addDir = new MockRAMDirectory(); + TestIndexWriterReader.createIndex(addDir, "beingAdded", true); + + writer.addIndexesAsync(new Directory[] { addDir }, false); + + int extDirCount = 0; + + synchronized (writer.segmentInfos) { + for (int x = 0; x < writer.segmentInfos.size(); x++) { + SegmentInfo info = writer.segmentInfos.info(x); + // make sure all of the infos are from our main dir + if (info.dir == addDir) + extDirCount++; + } + } + + assertTrue(extDirCount > 0); + + IndexReader reader = writer.getReader(); + assertEquals(300, reader.numDocs()); + reader.close(); + writer.close(); + dir.close(); + } +} Index: src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java =================================================================== --- src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 796572) +++ src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy) @@ -430,7 +430,7 @@ return writer; } - private void addDocs(IndexWriter writer, int numDocs) throws IOException { + public static void addDocs(IndexWriter writer, int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new Field("content", "aaa", Field.Store.NO,