Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java --- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Mon Dec 13 07:05:59 2010 -0500 @@ -450,7 +450,7 @@ assert docStoreSegment != null; if (infoStream != null) { - message("closeDocStore: files=" + openFiles + "; segment=" + docStoreSegment + "; docStoreOffset=" + docStoreOffset + "; numDocsInStore=" + numDocsInStore + "; isSeparate=" + isSeparate); + message("closeDocStore: openFiles=" + openFiles + "; segment=" + docStoreSegment + "; docStoreOffset=" + docStoreOffset + "; numDocsInStore=" + numDocsInStore + "; isSeparate=" + isSeparate); } closedFiles.clear(); @@ -720,17 +720,21 @@ docStoreSegment, numDocsInRAM, numDocsInStore, writer.getConfig().getTermIndexInterval(), SegmentCodecs.build(fieldInfos, writer.codecs)); - newSegment = new SegmentInfo(segment, numDocsInRAM, directory, false, -1, null, false, hasProx(), flushState.segmentCodecs); + newSegment = new SegmentInfo(segment, numDocsInRAM, directory, false, -1, null, false, hasProx(), flushState.segmentCodecs, false); if (!closeDocStore || docStoreOffset != 0) { newSegment.setDocStoreSegment(docStoreSegment); newSegment.setDocStoreOffset(docStoreOffset); } + + boolean hasVectors = false; if (closeDocStore) { closeDocStore(flushState, writer, deleter, newSegment, mergePolicy, segmentInfos); } + hasVectors |= flushState.hasVectors; + if (numDocsInRAM > 0) { assert nextDocID == numDocsInRAM; @@ -749,6 +753,19 @@ final long startNumBytesUsed = bytesUsed(); consumer.flush(threads, flushState); + hasVectors |= flushState.hasVectors; + + if (hasVectors) { + if (infoStream != null) { + message("new segment has vectors"); + } + newSegment.setHasVectors(true); + } else { + if (infoStream != null) { + message("new segment has no vectors"); + } + } + if (infoStream != null) { message("flushedFiles=" + flushState.flushedFiles); message("flushed codecs=" + newSegment.getSegmentCodecs()); Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java --- lucene/src/java/org/apache/lucene/index/IndexWriter.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/IndexWriter.java Mon Dec 13 07:05:59 2010 -0500 @@ -1047,8 +1047,10 @@ mergePolicy.close(); - finishMerges(waitForMerges); - stopMerges = true; + synchronized(this) { + finishMerges(waitForMerges); + stopMerges = true; + } mergeScheduler.close(); @@ -1251,10 +1253,12 @@ synchronized (this) { // If docWriter has some aborted files that were // never incref'd, then we clean them up here + deleter.checkpoint(segmentInfos, false); if (docWriter != null) { final Collection files = docWriter.abortedFiles(); - if (files != null) + if (files != null) { deleter.deleteNewFiles(files); + } } } } @@ -1879,7 +1883,14 @@ } try { - finishMerges(false); + synchronized(this) { + finishMerges(false); + stopMerges = true; + } + + if (infoStream != null ) { + message("rollback: done finish merges"); + } // Must pre-close these two, in case they increment // changeCount so that we can then set it to false @@ -2230,7 +2241,8 @@ int docCount = merger.merge(); // merge 'em SegmentInfo info = new SegmentInfo(mergedName, docCount, directory, - false, -1, null, false, merger.hasProx(), merger.getSegmentCodecs()); + false, -1, null, false, merger.hasProx(), merger.getSegmentCodecs(), + merger.hasVectors()); setDiagnostics(info, "addIndexes(IndexReader...)"); boolean useCompoundFile; @@ -2541,8 +2553,16 @@ return false; } finally { flushControl.clearFlushPending(); - if (!success && infoStream != null) { - message("hit exception during flush"); + if (!success) { + if (infoStream != null) { + message("hit exception during flush"); + } + if (docWriter != null) { + final Collection files = docWriter.abortedFiles(); + if (files != null) { + deleter.deleteNewFiles(files); + } + } } } } @@ -2928,6 +2948,7 @@ boolean mergeDocStores = false; boolean doFlushDocStore = false; + boolean hasVectors = false; final String currentDocStoreSegment = docWriter.getDocStoreSegment(); // Test each segment to be merged: check if we need to @@ -2939,6 +2960,10 @@ if (si.hasDeletions()) mergeDocStores = true; + if (si.getHasVectors()) { + hasVectors = true; + } + // If it has its own (private) doc stores we must // merge the doc stores if (-1 == si.getDocStoreOffset()) @@ -3014,6 +3039,7 @@ updatePendingMerges(1, false); } + merge.hasVectors = hasVectors; merge.mergeDocStores = mergeDocStores; // Bind a new segment name here so even with @@ -3024,8 +3050,8 @@ docStoreSegment, docStoreIsCompoundFile, false, - null); - + null, + false); Map details = new HashMap(); details.put("optimize", Boolean.toString(merge.optimize)); @@ -3033,6 +3059,10 @@ details.put("mergeDocStores", Boolean.toString(mergeDocStores)); setDiagnostics(merge.info, "merge", details); + if (infoStream != null) { + message("merge seg=" + merge.info.name + " mergeDocStores=" + mergeDocStores); + } + // Also enroll the merged segment into mergingSegments; // this prevents it from getting selected for a merge // after our merge is done but while we are building the @@ -3252,6 +3282,7 @@ // Record which codec was used to write the segment merge.info.setSegmentCodecs(merger.getSegmentCodecs()); + merge.info.setHasVectors(merger.hasVectors() || merge.hasVectors); if (infoStream != null) { message("merge segmentCodecs=" + merger.getSegmentCodecs()); @@ -3446,7 +3477,7 @@ // are called, deleter should know about every // file referenced by the current head // segmentInfos: - assert deleter.exists(fileName) : "IndexFileDeleter doesn't know about file " + fileName; + assert deleter.exists(fileName): "IndexFileDeleter doesn't know about file " + fileName; } return true; } Index: lucene/src/java/org/apache/lucene/index/MergePolicy.java --- lucene/src/java/org/apache/lucene/index/MergePolicy.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/MergePolicy.java Mon Dec 13 07:05:59 2010 -0500 @@ -68,6 +68,7 @@ SegmentInfo info; // used by IndexWriter boolean mergeDocStores; // used by IndexWriter + boolean hasVectors; // used by IndexWriter boolean optimize; // used by IndexWriter boolean registerDone; // used by IndexWriter long mergeGen; // used by IndexWriter @@ -156,6 +157,9 @@ if (mergeDocStores) { b.append(" [mergeDocStores]"); } + if (aborted) { + b.append(" [ABORTED]"); + } return b.toString(); } Index: lucene/src/java/org/apache/lucene/index/SegmentInfo.java --- lucene/src/java/org/apache/lucene/index/SegmentInfo.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/SegmentInfo.java Mon Dec 13 07:05:59 2010 -0500 @@ -79,13 +79,16 @@ private int delCount; // How many deleted docs in this segment private boolean hasProx; // True if this segment has any fields with omitTermFreqAndPositions==false - + + private byte hasVectors; // 0 if no; 1 if yes; 2 if must-check-filesystem (old index) + private SegmentCodecs segmentCodecs; private Map diagnostics; public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, int docStoreOffset, - String docStoreSegment, boolean docStoreIsCompoundFile, boolean hasProx, SegmentCodecs segmentCodecs) { + String docStoreSegment, boolean docStoreIsCompoundFile, boolean hasProx, SegmentCodecs segmentCodecs, + boolean hasVectors) { this.name = name; this.docCount = docCount; this.dir = dir; @@ -96,6 +99,7 @@ this.docStoreIsCompoundFile = docStoreIsCompoundFile; this.hasProx = hasProx; this.segmentCodecs = segmentCodecs; + this.hasVectors = (byte) (hasVectors ? 1 : 0); delCount = 0; assert docStoreOffset == -1 || docStoreSegment != null: "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount; } @@ -111,6 +115,7 @@ delGen = src.delGen; docStoreOffset = src.docStoreOffset; docStoreIsCompoundFile = src.docStoreIsCompoundFile; + hasVectors = src.hasVectors; if (src.normGen == null) { normGen = null; } else { @@ -184,6 +189,13 @@ segmentCodecs.codecs = new Codec[] { codecs.lookup("PreFlex")}; } diagnostics = input.readStringStringMap(); + + if (format <= DefaultSegmentInfosWriter.FORMAT_HAS_VECTORS) { + hasVectors = input.readByte(); + } else { + hasVectors = 2; + System.out.println("SI READ 2"); + } } /** Returns total size in bytes of all of files used by @@ -204,6 +216,27 @@ return sizeInBytes; } + public boolean getHasVectors() throws IOException { + if (hasVectors == 1) { + return true; + } else if (hasVectors == 0) { + return false; + } else { + final String storesSegment; + if (getDocStoreOffset() != -1) { + storesSegment = getDocStoreSegment(); + } else { + storesSegment = name; + } + return dir.fileExists(IndexFileNames.segmentFileName(storesSegment, "", IndexFileNames.VECTORS_INDEX_EXTENSION)); + } + } + + public void setHasVectors(boolean v) { + hasVectors = (byte) (v ? 1 : 0); + clearFiles(); + } + public boolean hasDeletions() { // Cases: // @@ -229,18 +262,14 @@ @Override public Object clone() { - SegmentInfo si = new SegmentInfo(name, docCount, dir, isCompoundFile, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, hasProx, segmentCodecs); - si.isCompoundFile = isCompoundFile; + SegmentInfo si = new SegmentInfo(name, docCount, dir, isCompoundFile, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, hasProx, segmentCodecs, false); si.delGen = delGen; si.delCount = delCount; - si.hasProx = hasProx; si.diagnostics = new HashMap(diagnostics); if (normGen != null) { si.normGen = normGen.clone(); } - si.docStoreOffset = docStoreOffset; - si.docStoreSegment = docStoreSegment; - si.docStoreIsCompoundFile = docStoreIsCompoundFile; + si.hasVectors = hasVectors; return si; } @@ -404,6 +433,7 @@ output.writeByte((byte) (hasProx ? 1:0)); segmentCodecs.write(output); output.writeStringStringMap(diagnostics); + output.writeByte(hasVectors); } void setHasProx(boolean hasProx) { @@ -466,12 +496,30 @@ if (docStoreIsCompoundFile) { fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.COMPOUND_FILE_STORE_EXTENSION)); } else { - for (String ext : IndexFileNames.STORE_INDEX_EXTENSIONS) - addIfExists(fileSet, IndexFileNames.segmentFileName(docStoreSegment, "", ext)); + fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.FIELDS_INDEX_EXTENSION)); + fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.FIELDS_EXTENSION)); + if (hasVectors == 1) { + fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_INDEX_EXTENSION)); + fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION)); + fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION)); + } else if (hasVectors == 2) { + addIfExists(fileSet, IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_INDEX_EXTENSION)); + addIfExists(fileSet, IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION)); + addIfExists(fileSet, IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION)); + } } } else if (!useCompoundFile) { - for (String ext : IndexFileNames.STORE_INDEX_EXTENSIONS) - addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", ext)); + fileSet.add(IndexFileNames.segmentFileName(name, "", IndexFileNames.FIELDS_INDEX_EXTENSION)); + fileSet.add(IndexFileNames.segmentFileName(name, "", IndexFileNames.FIELDS_EXTENSION)); + if (hasVectors == 1) { + fileSet.add(IndexFileNames.segmentFileName(name, "", IndexFileNames.VECTORS_INDEX_EXTENSION)); + fileSet.add(IndexFileNames.segmentFileName(name, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION)); + fileSet.add(IndexFileNames.segmentFileName(name, "", IndexFileNames.VECTORS_FIELDS_EXTENSION)); + } else if (hasVectors == 2) { + addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", IndexFileNames.VECTORS_INDEX_EXTENSION)); + addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION)); + addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", IndexFileNames.VECTORS_FIELDS_EXTENSION)); + } } String delFileName = IndexFileNames.fileNameFromGeneration(name, IndexFileNames.DELETES_EXTENSION, delGen); Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java Mon Dec 13 07:05:59 2010 -0500 @@ -75,6 +75,7 @@ private final CodecProvider codecs; private Codec codec; private SegmentWriteState segmentWriteState; + private boolean hasVectors; private PayloadProcessorProvider payloadProcessorProvider; @@ -100,6 +101,10 @@ return fieldInfos.hasProx(); } + boolean hasVectors() { + return hasVectors; + } + /** * Add an IndexReader to the collection of readers that are to be merged * @param reader @@ -427,7 +432,7 @@ private final void mergeVectors() throws IOException { TermVectorsWriter termVectorsWriter = new TermVectorsWriter(directory, segment, fieldInfos); - + hasVectors = true; try { int idx = 0; for (final IndexReader reader : readers) { Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java --- lucene/src/java/org/apache/lucene/index/SegmentReader.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/SegmentReader.java Mon Dec 13 07:05:59 2010 -0500 @@ -243,7 +243,7 @@ throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + fieldsReaderOrig.size() + " but segmentInfo shows " + si.docCount); } - if (fieldInfos.hasVectors()) { // open term vector files only as needed + if (si.getHasVectors()) { // open term vector files only as needed termVectorsReaderOrig = new TermVectorsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount); } } Index: lucene/src/java/org/apache/lucene/index/SegmentWriteState.java --- lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Mon Dec 13 07:05:59 2010 -0500 @@ -34,6 +34,7 @@ public final String docStoreSegmentName; public final int numDocs; public int numDocsInStore; + public boolean hasVectors; public final Collection flushedFiles; final SegmentCodecs segmentCodecs; Index: lucene/src/java/org/apache/lucene/index/TermVectorsReader.java --- lucene/src/java/org/apache/lucene/index/TermVectorsReader.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/TermVectorsReader.java Mon Dec 13 07:05:59 2010 -0500 @@ -77,38 +77,30 @@ try { String idxName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_INDEX_EXTENSION); - if (d.fileExists(idxName)) { - tvx = d.openInput(idxName, readBufferSize); - format = checkValidFormat(tvx, idxName); - String fn = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); - tvd = d.openInput(fn, readBufferSize); - final int tvdFormat = checkValidFormat(tvd, fn); - fn = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); - tvf = d.openInput(fn, readBufferSize); - final int tvfFormat = checkValidFormat(tvf, fn); + tvx = d.openInput(idxName, readBufferSize); + format = checkValidFormat(tvx, idxName); + String fn = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); + tvd = d.openInput(fn, readBufferSize); + final int tvdFormat = checkValidFormat(tvd, fn); + fn = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); + tvf = d.openInput(fn, readBufferSize); + final int tvfFormat = checkValidFormat(tvf, fn); - assert format == tvdFormat; - assert format == tvfFormat; + assert format == tvdFormat; + assert format == tvfFormat; - numTotalDocs = (int) (tvx.length() >> 4); + numTotalDocs = (int) (tvx.length() >> 4); - if (-1 == docStoreOffset) { - this.docStoreOffset = 0; - this.size = numTotalDocs; - assert size == 0 || numTotalDocs == size; - } else { - this.docStoreOffset = docStoreOffset; - this.size = size; - // Verify the file is long enough to hold all of our - // docs - assert numTotalDocs >= size + docStoreOffset: "numTotalDocs=" + numTotalDocs + " size=" + size + " docStoreOffset=" + docStoreOffset; - } + if (-1 == docStoreOffset) { + this.docStoreOffset = 0; + this.size = numTotalDocs; + assert size == 0 || numTotalDocs == size; } else { - // If all documents flushed in a segment had hit - // non-aborting exceptions, it's possible that - // FieldInfos.hasVectors returns true yet the term - // vector files don't exist. - format = 0; + this.docStoreOffset = docStoreOffset; + this.size = size; + // Verify the file is long enough to hold all of our + // docs + assert numTotalDocs >= size + docStoreOffset: "numTotalDocs=" + numTotalDocs + " size=" + size + " docStoreOffset=" + docStoreOffset; } this.fieldInfos = fieldInfos; Index: lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java --- lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java Mon Dec 13 07:05:59 2010 -0500 @@ -37,6 +37,7 @@ IndexOutput tvd; IndexOutput tvf; int lastDocID; + boolean hasVectors; public TermVectorsTermsWriter(DocumentsWriter docWriter) { this.docWriter = docWriter; @@ -57,6 +58,7 @@ // because, although FieldInfos.hasVectors() will return // true, the TermVectorsReader gracefully handles // non-existence of the term vectors files. + state.hasVectors = hasVectors; if (tvx != null) { @@ -108,6 +110,8 @@ docWriter.removeOpenFile(docName); lastDocID = 0; + state.hasVectors = hasVectors; + hasVectors = false; } } @@ -146,7 +150,7 @@ synchronized void initTermVectorsWriter() throws IOException { if (tvx == null) { - + final String docStoreSegment = docWriter.getDocStoreSegment(); if (docStoreSegment == null) @@ -159,6 +163,7 @@ String idxName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_INDEX_EXTENSION); String docName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); String fldName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); + hasVectors = true; tvx = docWriter.directory.createOutput(idxName); tvd = docWriter.directory.createOutput(docName); tvf = docWriter.directory.createOutput(fldName); @@ -218,6 +223,7 @@ @Override public void abort() { + hasVectors = false; if (tvx != null) { try { tvx.close(); Index: lucene/src/java/org/apache/lucene/index/codecs/DefaultSegmentInfosWriter.java --- lucene/src/java/org/apache/lucene/index/codecs/DefaultSegmentInfosWriter.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/java/org/apache/lucene/index/codecs/DefaultSegmentInfosWriter.java Mon Dec 13 07:05:59 2010 -0500 @@ -39,9 +39,12 @@ * in the new flex format */ public static final int FORMAT_4_0 = -10; + /** Each segment records whether it has term vectors */ + public static final int FORMAT_HAS_VECTORS = -11; + /** This must always point to the most recent file format. * whenever you add a new format, make it 1 smaller (negative version logic)! */ - public static final int FORMAT_CURRENT = FORMAT_4_0; + public static final int FORMAT_CURRENT = FORMAT_HAS_VECTORS; /** This must always point to the first supported file format. */ public static final int FORMAT_MINIMUM = FORMAT_DIAGNOSTICS; Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Mon Dec 13 07:05:59 2010 -0500 @@ -39,7 +39,6 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; public class TestAddIndexes extends LuceneTestCase { @@ -642,9 +641,11 @@ addDoc(writer); writer.close(); - dir2 = new MockDirectoryWrapper(random, new RAMDirectory()); + dir2 = newDirectory(); writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + writer2.setInfoStream(VERBOSE ? System.out : null); writer2.commit(); + readers = new IndexReader[NUM_COPY]; for(int i=0;i filesToDelete = merger.createCompoundFile(merged + ".cfs", info); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Mon Dec 13 07:05:59 2010 -0500 @@ -881,10 +881,14 @@ public void testEmptyDocAfterFlushingRealDoc() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + writer.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.commit(); + if (VERBOSE) { + System.out.println("\nTEST: now add empty doc"); + } writer.addDocument(new Document()); writer.close(); _TestUtil.checkIndex(dir); @@ -1027,7 +1031,11 @@ Directory dir = newDirectory(); int delID = 0; for(int i=0;i<20;i++) { + if (VERBOSE) { + System.out.println("TEST: iter=" + i); + } IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); + writer.setInfoStream(VERBOSE ? System.out : null); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp.setMergeFactor(2); lmp.setUseCompoundFile(false); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java --- lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Mon Dec 13 07:05:59 2010 -0500 @@ -51,7 +51,7 @@ System.out.println("TEST: pass=" + pass); } boolean doAbort = pass == 1; - long diskFree = 200; + long diskFree = _TestUtil.nextInt(random, 100, 300); while(true) { if (VERBOSE) { System.out.println("TEST: cycle: diskFree=" + diskFree); @@ -120,7 +120,7 @@ dir.close(); // Now try again w/ more space: - diskFree += 500; + diskFree += _TestUtil.nextInt(random, 400, 600); } else { //_TestUtil.syncConcurrentMerges(writer); dir.setMaxSizeInBytes(0); Index: lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java --- lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java Mon Dec 13 07:05:59 2010 -0500 @@ -80,7 +80,8 @@ assertTrue(docsMerged == 2); //Should be able to open a new SegmentReader against the new directory SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, -1, - null, false, merger.hasProx(), merger.getSegmentCodecs()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); + null, false, merger.hasProx(), merger.getSegmentCodecs(), merger.hasVectors()), + BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); Index: lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java --- lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java Sun Dec 12 18:05:00 2010 -0500 +++ lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java Mon Dec 13 07:05:59 2010 -0500 @@ -106,6 +106,7 @@ message += ")"; if (LuceneTestCase.VERBOSE) { System.out.println(Thread.currentThread().getName() + ": MDW: now throw fake disk full"); + new Throwable().printStackTrace(System.out); } throw new IOException(message); } else {