Index: src/java/org/apache/lucene/index/FieldsReader.java =================================================================== --- src/java/org/apache/lucene/index/FieldsReader.java (revision 494380) +++ src/java/org/apache/lucene/index/FieldsReader.java (working copy) @@ -46,6 +46,7 @@ indexStream = d.openInput(segment + ".fdx"); size = (int)(indexStream.length() / 8); + SegmentMerger.message("FieldsReader size: file " + segment + ".fdx has length " + indexStream.length() + " = size " + size); } final void close() throws IOException { Index: src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- src/java/org/apache/lucene/index/IndexReader.java (revision 494380) +++ src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -526,8 +526,8 @@ public final synchronized void deleteDocument(int docNum) throws IOException { if(directoryOwner) aquireWriteLock(); + hasChanges = true; doDelete(docNum); - hasChanges = true; } Index: src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- src/java/org/apache/lucene/index/SegmentMerger.java (revision 494380) +++ src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -20,6 +20,7 @@ import java.util.Iterator; import java.util.Collection; import java.io.IOException; +import java.io.PrintStream; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexOutput; @@ -181,11 +182,20 @@ for (int i = 0; i < readers.size(); i++) { IndexReader reader = (IndexReader) readers.elementAt(i); int maxDoc = reader.maxDoc(); + int startDocCount = docCount; for (int j = 0; j < maxDoc; j++) if (!reader.isDeleted(j)) { // skip deleted docs fieldsWriter.addDocument(reader.document(j)); docCount++; } + if (docCount - startDocCount != reader.numDocs()) { + String s = "!!ERROR!!: docCount was not correct\n"; + s += " reader " + (1+i) + " of " + readers.size() + "\n"; + s += " maxDoc = " + reader.maxDoc() + "\n"; + s += " numDoc = " + reader.numDocs() + "\n"; + s += " but actual num docs written = " + (docCount-startDocCount); + message(s); + } } } finally { fieldsWriter.close(); @@ -306,6 +316,20 @@ } } + static PrintStream infoStream = System.err; + + public static PrintStream getInfoStream() { + return infoStream; + } + public static void setInfoStream(PrintStream infoStream) { + SegmentMerger.infoStream = infoStream; + } + public static void message(String message) { + if (infoStream != null) { + infoStream.println(Thread.currentThread().getName() + ": " + message); + } + } + /** Process postings from multiple segments all positioned on the * same term. Writes out merged entries into freqOutput and * the proxOutput streams. @@ -329,10 +353,31 @@ int doc = postings.doc(); if (docMap != null) doc = docMap[doc]; // map around deletions + doc += base; // convert to merged space - if (doc < lastDoc) - throw new IllegalStateException("docs out of order"); + if (lastDoc != 0 && doc <= lastDoc) { + String message = "docs out of order (" + doc + " <= " + lastDoc + " )\n"; + message += " at " + (1+i) + " of " + n + " segments\n"; + message += " df [so far] = " + df + "\n"; + for(int k=0;k