Index: src/test/org/apache/lucene/index/TestDoc.java =================================================================== --- src/test/org/apache/lucene/index/TestDoc.java (revision 966819) +++ src/test/org/apache/lucene/index/TestDoc.java (working copy) @@ -194,7 +194,7 @@ merger.closeReaders(); final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, - useCompoundFile, -1, null, false, merger.hasProx(), merger.getCodec()); + useCompoundFile, merger.hasProx(), merger.getCodec()); if (useCompoundFile) { List filesToDelete = merger.createCompoundFile(merged + ".cfs", info); Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 966819) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -1437,7 +1437,7 @@ doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<19;i++) writer.addDocument(doc); - writer.flush(false, true, true); + writer.flush(false, true); writer.close(); SegmentInfos sis = new SegmentInfos(); sis.read(dir); Index: src/test/org/apache/lucene/index/TestCodecs.java =================================================================== --- src/test/org/apache/lucene/index/TestCodecs.java (revision 966818) +++ src/test/org/apache/lucene/index/TestCodecs.java (working copy) @@ -281,7 +281,7 @@ final Directory dir = new MockRAMDirectory(); this.write(fieldInfos, dir, fields); - final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, -1, SEGMENT, false, true, CodecProvider.getDefault().getWriter(null)); + final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, true, CodecProvider.getDefault().getWriter(null)); si.setHasProx(false); final FieldsProducer reader = si.getCodec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 64, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR)); @@ -319,7 +319,7 @@ final Directory dir = new MockRAMDirectory(); this.write(fieldInfos, dir, fields); - final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, -1, SEGMENT, false, true, CodecProvider.getDefault().getWriter(null)); + final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, true, CodecProvider.getDefault().getWriter(null)); final FieldsProducer terms = si.getCodec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 1024, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR)); @@ -602,7 +602,7 @@ final int termIndexInterval = this.nextInt(13, 27); - final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, null, 10000, 10000, termIndexInterval, + final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, CodecProvider.getDefault()); final FieldsConsumer consumer = state.codec.fieldsConsumer(state); Index: src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java =================================================================== --- src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 966818) +++ src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy) @@ -84,7 +84,7 @@ writer.addDocument(doc); failure.setDoFail(); try { - writer.flush(true, false, true); + writer.flush(true, true); if (failure.hitExc) { fail("failed to hit IOException"); } Index: src/test/org/apache/lucene/index/TestIndexWriterReader.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 966819) +++ src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy) @@ -146,7 +146,7 @@ writer.setInfoStream(infoStream); // create the index createIndexNoClose(!optimize, "index1", writer); - writer.flush(false, true, true); + writer.flush(false, true); // create a 2nd index Directory dir2 = new MockRAMDirectory(); @@ -224,7 +224,7 @@ writer.setInfoStream(infoStream); // create the index createIndexNoClose(!optimize, "index1", writer); - writer.flush(false, true, true); + writer.flush(false, true); // get a reader IndexReader r1 = writer.getReader(); @@ -426,7 +426,7 @@ IndexReader r1 = writer.getReader(); assertEquals(0, r1.maxDoc()); createIndexNoClose(false, "index1", writer); - writer.flush(!optimize, true, true); + writer.flush(!optimize, true); IndexReader iwr1 = writer.getReader(); assertEquals(100, iwr1.maxDoc()); @@ -438,7 +438,7 @@ Document d = createDocument(x, "index1", 5); writer.addDocument(d); } - writer.flush(false, true, true); + writer.flush(false, true); // verify the reader was reopened internally IndexReader iwr2 = writer.getReader(); assertTrue(iwr2 != r1); Index: src/test/org/apache/lucene/index/TestSegmentMerger.java =================================================================== --- src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 966819) +++ src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy) @@ -73,8 +73,8 @@ merger.closeReaders(); assertTrue(docsMerged == 2); //Should be able to open a new SegmentReader against the new directory - SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, -1, - null, false, merger.hasProx(), merger.getCodec()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, null); + SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, + merger.hasProx(), merger.getCodec()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, null); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); Index: src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java =================================================================== --- src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java (revision 966819) +++ src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java (working copy) @@ -134,7 +134,7 @@ dir.createOutput(segName + ".frq").close(); // !!hack alert!! stuffing uniqueTermCount in as docCount - return new SegmentInfo(segName, uniqueTermCount, dir, false, -1, null, false, true, codec); + return new SegmentInfo(segName, uniqueTermCount, dir, false, true, codec); } private String toHexString(Term t) { Index: src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java (revision 966818) +++ src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java (working copy) @@ -121,8 +121,8 @@ throw new ThreadInterruptedException(ie); } + pauseAllThreads(); globalLock = true; - pauseAllThreads(); } finally { lock.unlock(); } Index: src/java/org/apache/lucene/index/DocumentsWriterPerThread.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (revision 966818) +++ src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (working copy) @@ -2,15 +2,12 @@ import java.io.IOException; import java.io.PrintStream; -import java.util.ArrayList; -import java.util.List; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.search.Similarity; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.RAMFile; import org.apache.lucene.util.ArrayUtil; public class DocumentsWriterPerThread { @@ -94,10 +91,6 @@ } catch (Throwable t) { } - docStoreSegment = null; - numDocsInStore = 0; - docStoreOffset = 0; - // Reset all postings data doAfterFlush(); @@ -121,19 +114,15 @@ private DocFieldProcessor docFieldProcessor; String segment; // Current segment we are working on - private String docStoreSegment; // Current doc-store segment we are writing - private int docStoreOffset; // Current starting doc-store offset of current segment boolean aborting; // True if an abort is pending private final PrintStream infoStream; private int numDocsInRAM; - private int numDocsInStore; private int flushedDocCount; SegmentWriteState flushState; long[] sequenceIDs = new long[8]; - final List closedFiles = new ArrayList(); long numBytesUsed; @@ -161,13 +150,15 @@ docState.doc = doc; docState.analyzer = analyzer; docState.docID = numDocsInRAM; - initSegmentName(false); + if (segment == null) { + // this call is synchronized on IndexWriter.segmentInfos + segment = writer.newSegmentName(); + assert numDocsInRAM == 0; + } - final DocWriter perDoc; - boolean success = false; try { - perDoc = consumer.processDocument(); + consumer.processDocument(); success = true; } finally { @@ -181,9 +172,7 @@ success = false; try { - if (perDoc != null) { - perDoc.finish(); - } + consumer.finishDocument(); success = true; } finally { @@ -201,7 +190,6 @@ sequenceIDs[numDocsInRAM] = sequenceID; numDocsInRAM++; - numDocsInStore++; } int getNumDocsInRAM() { @@ -226,26 +214,6 @@ return flushState.codec; } - void initSegmentName(boolean onlyDocStore) { - if (segment == null && (!onlyDocStore || docStoreSegment == null)) { - // this call is synchronized on IndexWriter.segmentInfos - segment = writer.newSegmentName(); - assert numDocsInRAM == 0; - } - if (docStoreSegment == null) { - docStoreSegment = segment; - assert numDocsInStore == 0; - } - } - - - private void initFlushState(boolean onlyDocStore) { - initSegmentName(onlyDocStore); - flushState = new SegmentWriteState(infoStream, directory, segment, docFieldProcessor.fieldInfos, - docStoreSegment, numDocsInRAM, numDocsInStore, writer.getConfig().getTermIndexInterval(), - writer.codecs); - } - /** Reset after a flush */ private void doAfterFlush() throws IOException { segment = null; @@ -253,13 +221,13 @@ } /** Flush all pending docs to a new segment */ - SegmentInfo flush(boolean closeDocStore) throws IOException { + SegmentInfo flush() throws IOException { assert numDocsInRAM > 0; - initFlushState(closeDocStore); + flushState = new SegmentWriteState(infoStream, directory, segment, docFieldProcessor.fieldInfos, + numDocsInRAM, writer.getConfig().getTermIndexInterval(), + writer.codecs); - docStoreOffset = numDocsInStore; - if (infoStream != null) { message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM); } @@ -267,22 +235,15 @@ boolean success = false; try { - - if (closeDocStore) { - assert flushState.docStoreSegmentName != null; - assert flushState.docStoreSegmentName.equals(flushState.segmentName); - closeDocStore(); - flushState.numDocsInStore = 0; - } - consumer.flush(flushState); + consumer.closeDocStore(flushState); + + if (infoStream != null) { SegmentInfo si = new SegmentInfo(flushState.segmentName, flushState.numDocs, directory, false, - docStoreOffset, flushState.docStoreSegmentName, - false, hasProx(), getCodec()); @@ -305,8 +266,6 @@ SegmentInfo newSegment = new SegmentInfo(flushState.segmentName, flushState.numDocs, directory, false, - docStoreOffset, flushState.docStoreSegmentName, - false, hasProx(), getCodec()); @@ -325,62 +284,11 @@ } } - /** Closes the current open doc stores an returns the doc - * store segment name. This returns null if there are * - * no buffered documents. */ - String closeDocStore() throws IOException { - - // nocommit -// if (infoStream != null) -// message("closeDocStore: " + openFiles.size() + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore); - - boolean success = false; - - try { - initFlushState(true); - closedFiles.clear(); - - consumer.closeDocStore(flushState); - // nocommit - //assert 0 == openFiles.size(); - - String s = docStoreSegment; - docStoreSegment = null; - docStoreOffset = 0; - numDocsInStore = 0; - success = true; - return s; - } finally { - if (!success) { - parent.abort(); - } - } - } - - /** Get current segment name we are writing. */ String getSegment() { return segment; } - /** Returns the current doc store segment we are writing - * to. */ - String getDocStoreSegment() { - return docStoreSegment; - } - - /** Returns the doc offset into the shared doc store for - * the current buffered docs. */ - int getDocStoreOffset() { - return docStoreOffset; - } - - - @SuppressWarnings("unchecked") - List closedFiles() { - return (List) ((ArrayList) closedFiles).clone(); - } - void addOpenFile(String name) { synchronized(parent.openFiles) { assert !parent.openFiles.contains(name); @@ -393,61 +301,8 @@ assert parent.openFiles.contains(name); parent.openFiles.remove(name); } - closedFiles.add(name); } - /** Consumer returns this on each doc. This holds any - * state that must be flushed synchronized "in docID - * order". We gather these and flush them in order. */ - abstract static class DocWriter { - DocWriter next; - int docID; - abstract void finish() throws IOException; - abstract void abort(); - abstract long sizeInBytes(); - - void setNext(DocWriter next) { - this.next = next; - } - } - - /** - * Create and return a new DocWriterBuffer. - */ - PerDocBuffer newPerDocBuffer() { - return new PerDocBuffer(); - } - - /** - * RAMFile buffer for DocWriters. - */ - class PerDocBuffer extends RAMFile { - - /** - * Allocate bytes used from shared pool. - */ - protected byte[] newBuffer(int size) { - assert size == DocumentsWriterRAMAllocator.PER_DOC_BLOCK_SIZE; - return ramAllocator.perDocAllocator.getByteBlock(); - } - - /** - * Recycle the bytes used. - */ - synchronized void recycle() { - if (buffers.size() > 0) { - setLength(0); - - // Recycle the blocks - ramAllocator.perDocAllocator.recycleByteBlocks(buffers); - buffers.clear(); - sizeInBytes = 0; - - assert numBuffers() == 0; - } - } - } - void bytesUsed(long numBytes) { ramAllocator.bytesUsed(numBytes); } Index: src/java/org/apache/lucene/index/TermsHash.java =================================================================== --- src/java/org/apache/lucene/index/TermsHash.java (revision 966818) +++ src/java/org/apache/lucene/index/TermsHash.java (working copy) @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.lucene.index.DocumentsWriterPerThread.DocWriter; import org.apache.lucene.util.BytesRef; /** This class implements {@link InvertedDocConsumer}, which @@ -144,21 +143,14 @@ } @Override - DocWriter finishDocument() throws IOException { - final DocumentsWriterPerThread.DocWriter doc = consumer.finishDocument(); - - final DocumentsWriterPerThread.DocWriter doc2; - if (nextTermsHash != null) { - doc2 = nextTermsHash.consumer.finishDocument(); - } else { - doc2 = null; + void finishDocument() throws IOException { + try { + consumer.finishDocument(); + } finally { + if (nextTermsHash != null) { + nextTermsHash.consumer.finishDocument(); + } } - if (doc == null) { - return doc2; - } else { - doc.setNext(doc2); - return doc; - } } @Override Index: src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- src/java/org/apache/lucene/index/SegmentMerger.java (revision 966818) +++ src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -65,12 +65,6 @@ private final CheckAbort checkAbort; - // Whether we should merge doc stores (stored fields and - // vectors files). When all segments we are merging - // already share the same doc store files, we don't need - // to merge the doc stores. - private boolean mergeDocStores; - /** Maximum number of contiguous documents to bulk-copy when merging stored fields */ private final static int MAX_RAW_MERGE_DOCS = 4192; @@ -127,22 +121,6 @@ * @throws IOException if there is a low-level IO error */ final int merge() throws CorruptIndexException, IOException { - return merge(true); - } - - /** - * Merges the readers specified by the {@link #add} method - * into the directory passed to the constructor. - * @param mergeDocStores if false, we will not merge the - * stored fields nor vectors files - * @return The number of documents that were merged - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error - */ - final int merge(boolean mergeDocStores) throws CorruptIndexException, IOException { - - this.mergeDocStores = mergeDocStores; - // NOTE: it's important to add calls to // checkAbort.work(...) if you make any changes to this // method that will spend alot of time. The frequency @@ -154,7 +132,7 @@ mergeTerms(); mergeNorms(); - if (mergeDocStores && fieldInfos.hasVectors()) + if (fieldInfos.hasVectors()) mergeVectors(); return mergedDocs; @@ -179,9 +157,7 @@ // Basic files for (String ext : IndexFileNames.COMPOUND_EXTENSIONS_NOT_CODEC) { - if (mergeDocStores || (!ext.equals(IndexFileNames.FIELDS_EXTENSION) && - !ext.equals(IndexFileNames.FIELDS_INDEX_EXTENSION))) - fileSet.add(IndexFileNames.segmentFileName(segment, "", ext)); + fileSet.add(IndexFileNames.segmentFileName(segment, "", ext)); } codec.files(directory, info, fileSet); @@ -197,7 +173,7 @@ } // Vector files - if (fieldInfos.hasVectors() && mergeDocStores) { + if (fieldInfos.hasVectors()) { for (String ext : IndexFileNames.VECTOR_EXTENSIONS) { fileSet.add(IndexFileNames.segmentFileName(segment, "", ext)); } @@ -270,17 +246,20 @@ */ private final int mergeFields() throws CorruptIndexException, IOException { - if (!mergeDocStores) { - // When we are not merging by doc stores, their field - // name -> number mapping are the same. So, we start - // with the fieldInfos of the last segment in this - // case, to keep that numbering. - final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1); - fieldInfos = (FieldInfos) sr.core.fieldInfos.clone(); - } else { - fieldInfos = new FieldInfos(); // merge field names - } + //nocommit +// if (!mergeDocStores) { +// // When we are not merging by doc stores, their field +// // name -> number mapping are the same. So, we start +// // with the fieldInfos of the last segment in this +// // case, to keep that numbering. +// final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1); +// fieldInfos = (FieldInfos) sr.core.fieldInfos.clone(); +// } else { +// fieldInfos = new FieldInfos(); // merge field names +// } + fieldInfos = new FieldInfos(); // merge field names + for (IndexReader reader : readers) { if (reader instanceof SegmentReader) { SegmentReader segmentReader = (SegmentReader) reader; @@ -310,55 +289,46 @@ setMatchingSegmentReaders(); - if (mergeDocStores) { - // merge field values - final FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos); + // merge field values + final FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos); - try { - int idx = 0; - for (IndexReader reader : readers) { - final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++]; - FieldsReader matchingFieldsReader = null; - if (matchingSegmentReader != null) { - final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader(); - if (fieldsReader != null && fieldsReader.canReadRawDocs()) { - matchingFieldsReader = fieldsReader; - } + try { + int idx = 0; + for (IndexReader reader : readers) { + final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++]; + FieldsReader matchingFieldsReader = null; + if (matchingSegmentReader != null) { + final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader(); + if (fieldsReader != null && fieldsReader.canReadRawDocs()) { + matchingFieldsReader = fieldsReader; } - if (reader.hasDeletions()) { - docCount += copyFieldsWithDeletions(fieldsWriter, - reader, matchingFieldsReader); - } else { - docCount += copyFieldsNoDeletions(fieldsWriter, + } + if (reader.hasDeletions()) { + docCount += copyFieldsWithDeletions(fieldsWriter, reader, matchingFieldsReader); - } + } else { + docCount += copyFieldsNoDeletions(fieldsWriter, + reader, matchingFieldsReader); } - } finally { - fieldsWriter.close(); } + } finally { + fieldsWriter.close(); + } - final String fileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION); - final long fdxFileLength = directory.fileLength(fileName); + final String fileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION); + final long fdxFileLength = directory.fileLength(fileName); - if (4+((long) docCount)*8 != fdxFileLength) - // This is most likely a bug in Sun JRE 1.6.0_04/_05; - // we detect that the bug has struck, here, and - // throw an exception to prevent the corruption from - // entering the index. See LUCENE-1282 for - // details. - throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); - - } else { - // If we are skipping the doc stores, that means there - // are no deletions in any of these segments, so we - // just sum numDocs() of each segment to get total docCount - for (final IndexReader reader : readers) { - docCount += reader.numDocs(); - } + if (4+((long) docCount)*8 != fdxFileLength) { + // This is most likely a bug in Sun JRE 1.6.0_04/_05; + // we detect that the bug has struck, here, and + // throw an exception to prevent the corruption from + // entering the index. See LUCENE-1282 for + // details. + throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); } + + segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecs); - segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, null, docCount, 0, termIndexInterval, codecs); - return docCount; } Index: src/java/org/apache/lucene/index/DocFieldProcessor.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldProcessor.java (revision 966818) +++ src/java/org/apache/lucene/index/DocFieldProcessor.java (working copy) @@ -26,8 +26,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Fieldable; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.RamUsageEstimator; /** @@ -105,8 +103,11 @@ } } - fieldsWriter.abort(); - consumer.abort(); + try { + fieldsWriter.abort(); + } finally { + consumer.abort(); + } } @Override @@ -190,7 +191,7 @@ } @Override - public DocumentsWriterPerThread.DocWriter processDocument() throws IOException { + public void processDocument() throws IOException { consumer.startDocument(); fieldsWriter.startDocument(); @@ -287,24 +288,18 @@ docState.infoStream.println("WARNING: document contains at least one immense term (whose UTF8 encoding is longer than the max length " + DocumentsWriterRAMAllocator.MAX_TERM_LENGTH_UTF8 + "), all of which were skipped. Please correct the analyzer to not produce such terms. The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'"); docState.maxTermPrefix = null; } - - final DocumentsWriterPerThread.DocWriter one = fieldsWriter.finishDocument(); - final DocumentsWriterPerThread.DocWriter two = consumer.finishDocument(); - if (one == null) { - return two; - } else if (two == null) { - return one; - } else { - PerDoc both = getPerDoc(); - both.docID = docState.docID; - assert one.docID == docState.docID; - assert two.docID == docState.docID; - both.one = one; - both.two = two; - return both; + } + + @Override + void finishDocument() throws IOException { + try { + fieldsWriter.finishDocument(); + } finally { + consumer.finishDocument(); } } + void quickSort(DocFieldProcessorPerField[] array, int lo, int hi) { if (lo >= hi) return; @@ -365,65 +360,4 @@ quickSort(array, lo, left); quickSort(array, left + 1, hi); } - - PerDoc[] docFreeList = new PerDoc[1]; - int freeCount; - int allocCount; - - PerDoc getPerDoc() { - if (freeCount == 0) { - allocCount++; - if (allocCount > docFreeList.length) { - // Grow our free list up front to make sure we have - // enough space to recycle all outstanding PerDoc - // instances - assert allocCount == 1+docFreeList.length; - docFreeList = new PerDoc[ArrayUtil.oversize(allocCount, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; - } - return new PerDoc(); - } else - return docFreeList[--freeCount]; - } - - void freePerDoc(PerDoc perDoc) { - assert freeCount < docFreeList.length; - docFreeList[freeCount++] = perDoc; - } - - class PerDoc extends DocumentsWriterPerThread.DocWriter { - - DocumentsWriterPerThread.DocWriter one; - DocumentsWriterPerThread.DocWriter two; - - @Override - public long sizeInBytes() { - return one.sizeInBytes() + two.sizeInBytes(); - } - - @Override - public void finish() throws IOException { - try { - try { - one.finish(); - } finally { - two.finish(); - } - } finally { - freePerDoc(this); - } - } - - @Override - public void abort() { - try { - try { - one.abort(); - } finally { - two.abort(); - } - } finally { - freePerDoc(this); - } - } - } } Index: src/java/org/apache/lucene/index/SegmentInfo.java =================================================================== --- src/java/org/apache/lucene/index/SegmentInfo.java (revision 966819) +++ src/java/org/apache/lucene/index/SegmentInfo.java (working copy) @@ -70,11 +70,11 @@ long sizeInBytes = -1; // total byte size of all of our files (computed on demand) - private int docStoreOffset; // if this segment shares stored fields & vectors, this + @Deprecated private int docStoreOffset; // if this segment shares stored fields & vectors, this // offset is where in that file this segment's docs begin - private String docStoreSegment; // name used to derive fields/vectors file we share with + @Deprecated private String docStoreSegment; // name used to derive fields/vectors file we share with // other segments - private boolean docStoreIsCompoundFile; // whether doc store files are stored in compound file (*.cfx) + @Deprecated private boolean docStoreIsCompoundFile; // whether doc store files are stored in compound file (*.cfx) private int delCount; // How many deleted docs in this segment @@ -87,20 +87,23 @@ private Map diagnostics; - public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, int docStoreOffset, - String docStoreSegment, boolean docStoreIsCompoundFile, boolean hasProx, Codec codec) { + public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, boolean hasProx, Codec codec) { + this(name, docCount, dir, isCompoundFile, -1, null, false, hasProx, codec); + } + + private SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, int docStoreOffset, + String docStoreSegment, boolean docStoreIsCompoundFile, boolean hasProx, Codec codec) { this.name = name; this.docCount = docCount; this.dir = dir; delGen = NO; this.isCompoundFile = isCompoundFile; - this.docStoreOffset = docStoreOffset; - this.docStoreSegment = docStoreSegment; - this.docStoreIsCompoundFile = docStoreIsCompoundFile; this.hasProx = hasProx; this.codec = codec; delCount = 0; - assert docStoreOffset == -1 || docStoreSegment != null: "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount; + this.docStoreOffset = docStoreOffset; + this.docStoreIsCompoundFile = docStoreIsCompoundFile; + this.docStoreSegment = docStoreSegment; } /** @@ -176,11 +179,13 @@ docStoreSegment = name; docStoreIsCompoundFile = false; } + if (format > DefaultSegmentInfosWriter.FORMAT_4_0) { // pre-4.0 indexes write a byte if there is a single norms file byte b = input.readByte(); assert 1 == b; } + int numNormGen = input.readInt(); if (numNormGen == NO) { normGen = null; @@ -367,32 +372,24 @@ assert delCount <= docCount; } + @Deprecated public int getDocStoreOffset() { return docStoreOffset; } + @Deprecated public boolean getDocStoreIsCompoundFile() { return docStoreIsCompoundFile; } - void setDocStoreIsCompoundFile(boolean v) { - docStoreIsCompoundFile = v; - clearFiles(); - } - + @Deprecated public String getDocStoreSegment() { return docStoreSegment; } - - void setDocStoreOffset(int offset) { - docStoreOffset = offset; - clearFiles(); - } - void setDocStore(int offset, String segment, boolean isCompoundFile) { - docStoreOffset = offset; - docStoreSegment = segment; - docStoreIsCompoundFile = isCompoundFile; + @Deprecated + public void setDocStoreSegment(String docStoreSegment) { + this.docStoreSegment = docStoreSegment; clearFiles(); } @@ -403,12 +400,14 @@ output.writeString(name); output.writeInt(docCount); output.writeLong(delGen); + output.writeInt(docStoreOffset); if (docStoreOffset != -1) { output.writeString(docStoreSegment); output.writeByte((byte) (docStoreIsCompoundFile ? 1:0)); } + if (normGen == null) { output.writeInt(NO); } else { Index: src/java/org/apache/lucene/index/FreqProxTermsWriter.java =================================================================== --- src/java/org/apache/lucene/index/FreqProxTermsWriter.java (revision 966818) +++ src/java/org/apache/lucene/index/FreqProxTermsWriter.java (working copy) @@ -20,14 +20,10 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Map; -import org.apache.lucene.index.DocumentsWriterPerThread.DocWriter; import org.apache.lucene.index.codecs.FieldsConsumer; -import org.apache.lucene.index.codecs.PostingsConsumer; -import org.apache.lucene.index.codecs.TermsConsumer; import org.apache.lucene.util.BytesRef; final class FreqProxTermsWriter extends TermsHashConsumer { @@ -112,8 +108,7 @@ } @Override - DocWriter finishDocument() throws IOException { - return null; + void finishDocument() throws IOException { } @Override Index: src/java/org/apache/lucene/index/InvertedDocConsumer.java =================================================================== --- src/java/org/apache/lucene/index/InvertedDocConsumer.java (revision 966818) +++ src/java/org/apache/lucene/index/InvertedDocConsumer.java (working copy) @@ -35,7 +35,7 @@ abstract void startDocument() throws IOException; - abstract DocumentsWriterPerThread.DocWriter finishDocument() throws IOException; + abstract void finishDocument() throws IOException; /** Attempt to free RAM, returning true if any RAM was * freed */ Index: src/java/org/apache/lucene/index/DocFieldConsumer.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldConsumer.java (revision 966818) +++ src/java/org/apache/lucene/index/DocFieldConsumer.java (working copy) @@ -44,7 +44,7 @@ abstract DocFieldConsumerPerField addField(FieldInfo fi); - abstract DocumentsWriterPerThread.DocWriter finishDocument() throws IOException; + abstract void finishDocument() throws IOException; void setFieldInfos(FieldInfos fieldInfos) { this.fieldInfos = fieldInfos; Index: src/java/org/apache/lucene/index/DocumentsWriterRAMAllocator.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriterRAMAllocator.java (revision 966818) +++ src/java/org/apache/lucene/index/DocumentsWriterRAMAllocator.java (working copy) @@ -4,6 +4,7 @@ import java.util.ArrayList; import java.util.List; +import org.apache.lucene.store.RAMFile; import org.apache.lucene.util.Constants; class DocumentsWriterRAMAllocator { @@ -145,4 +146,41 @@ return nf.format(v/1024./1024.); } + /** + * Create and return a new DocWriterBuffer. + */ + PerDocBuffer newPerDocBuffer() { + return new PerDocBuffer(); + } + + /** + * RAMFile buffer for DocWriters. + */ + class PerDocBuffer extends RAMFile { + + /** + * Allocate bytes used from shared pool. + */ + protected byte[] newBuffer(int size) { + assert size == DocumentsWriterRAMAllocator.PER_DOC_BLOCK_SIZE; + return perDocAllocator.getByteBlock(); + } + + /** + * Recycle the bytes used. + */ + synchronized void recycle() { + if (buffers.size() > 0) { + setLength(0); + + // Recycle the blocks + perDocAllocator.recycleByteBlocks(buffers); + buffers.clear(); + sizeInBytes = 0; + + assert numBuffers() == 0; + } + } + } + } Index: src/java/org/apache/lucene/index/DocFieldConsumers.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldConsumers.java (revision 966818) +++ src/java/org/apache/lucene/index/DocFieldConsumers.java (working copy) @@ -21,9 +21,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.RamUsageEstimator; - /** This is just a "splitter" class: it lets you wrap two * DocFieldConsumer instances as a single consumer. */ @@ -86,83 +83,12 @@ return any; } - PerDoc[] docFreeList = new PerDoc[1]; - int freeCount; - int allocCount; - - PerDoc getPerDoc() { - if (freeCount == 0) { - allocCount++; - if (allocCount > docFreeList.length) { - // Grow our free list up front to make sure we have - // enough space to recycle all outstanding PerDoc - // instances - assert allocCount == 1+docFreeList.length; - docFreeList = new PerDoc[ArrayUtil.oversize(allocCount, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; - } - return new PerDoc(); - } else - return docFreeList[--freeCount]; - } - - void freePerDoc(PerDoc perDoc) { - assert freeCount < docFreeList.length; - docFreeList[freeCount++] = perDoc; - } - - class PerDoc extends DocumentsWriterPerThread.DocWriter { - - DocumentsWriterPerThread.DocWriter writerOne; - DocumentsWriterPerThread.DocWriter writerTwo; - - @Override - public long sizeInBytes() { - return writerOne.sizeInBytes() + writerTwo.sizeInBytes(); - } - - @Override - public void finish() throws IOException { - try { - try { - writerOne.finish(); - } finally { - writerTwo.finish(); - } - } finally { - freePerDoc(this); - } - } - - @Override - public void abort() { - try { - try { - writerOne.abort(); - } finally { - writerTwo.abort(); - } - } finally { - freePerDoc(this); - } - } - } - @Override - public DocumentsWriterPerThread.DocWriter finishDocument() throws IOException { - final DocumentsWriterPerThread.DocWriter oneDoc = one.finishDocument(); - final DocumentsWriterPerThread.DocWriter twoDoc = two.finishDocument(); - if (oneDoc == null) - return twoDoc; - else if (twoDoc == null) - return oneDoc; - else { - DocFieldConsumers.PerDoc both = getPerDoc(); - both.docID = docState.docID; - assert oneDoc.docID == docState.docID; - assert twoDoc.docID == docState.docID; - both.writerOne = oneDoc; - both.writerTwo = twoDoc; - return both; + public void finishDocument() throws IOException { + try { + one.finishDocument(); + } finally { + two.finishDocument(); } } Index: src/java/org/apache/lucene/index/TermsHashConsumer.java =================================================================== --- src/java/org/apache/lucene/index/TermsHashConsumer.java (revision 966818) +++ src/java/org/apache/lucene/index/TermsHashConsumer.java (working copy) @@ -26,7 +26,7 @@ abstract void closeDocStore(SegmentWriteState state) throws IOException; abstract void startDocument() throws IOException; - abstract DocumentsWriterPerThread.DocWriter finishDocument() throws IOException; + abstract void finishDocument() throws IOException; abstract public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo); FieldInfos fieldInfos; Index: src/java/org/apache/lucene/index/MergePolicy.java =================================================================== --- src/java/org/apache/lucene/index/MergePolicy.java (revision 966818) +++ src/java/org/apache/lucene/index/MergePolicy.java (working copy) @@ -67,7 +67,6 @@ public static class OneMerge { SegmentInfo info; // used by IndexWriter - boolean mergeDocStores; // used by IndexWriter boolean optimize; // used by IndexWriter boolean increfDone; // used by IndexWriter boolean registerDone; // used by IndexWriter @@ -157,9 +156,6 @@ b.append(" into ").append(info.name); if (optimize) b.append(" [optimize]"); - if (mergeDocStores) { - b.append(" [mergeDocStores]"); - } return b.toString(); } } Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java =================================================================== --- src/java/org/apache/lucene/index/StoredFieldsWriter.java (revision 966818) +++ src/java/org/apache/lucene/index/StoredFieldsWriter.java (working copy) @@ -33,12 +33,10 @@ final DocumentsWriterPerThread docWriter; final FieldInfos fieldInfos; int lastDocID; - private String docStoreSegment; + private String segment; - PerDoc[] docFreeList = new PerDoc[1]; int freeCount; - PerDoc doc; final DocumentsWriterPerThread.DocState docState; public StoredFieldsWriter(DocumentsWriterPerThread docWriter, FieldInfos fieldInfos) { @@ -47,21 +45,26 @@ this.docState = docWriter.docState; localFieldsWriter = new FieldsWriter((IndexOutput) null, (IndexOutput) null, fieldInfos); } + + DocumentsWriterRAMAllocator.PerDocBuffer buffer; + RAMOutputStream fdt; + int numStoredFields; - public void startDocument() { - if (doc != null) { - // Only happens if previous document hit non-aborting - // exception while writing stored fields into - // localFieldsWriter: - doc.reset(); - doc.docID = docState.docID; + public void reset() { + if (fdt != null) { + fdt.reset(); + buffer.recycle(); + numStoredFields = 0; } } - + public void startDocument() { + reset(); + } + public void flush(SegmentWriteState state) throws IOException { - if (state.numDocsInStore > 0) { + if (state.numDocs > 0) { // It's possible that all documents seen in this segment // hit non-aborting exceptions, in which case we will // not have yet init'd the FieldsWriter: @@ -69,86 +72,55 @@ // Fill fdx file to include any final docs that we // skipped because they hit non-aborting exceptions - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); } if (fieldsWriter != null) fieldsWriter.flush(); } - private synchronized void initFieldsWriter() throws IOException { + private void initFieldsWriter() throws IOException { if (fieldsWriter == null) { - docStoreSegment = docWriter.getDocStoreSegment(); - if (docStoreSegment != null) { + segment = docWriter.getSegment(); + if (segment != null) { fieldsWriter = new FieldsWriter(docWriter.directory, - docStoreSegment, + segment, fieldInfos); - docWriter.addOpenFile(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.FIELDS_EXTENSION)); - docWriter.addOpenFile(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.FIELDS_INDEX_EXTENSION)); + docWriter.addOpenFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_EXTENSION)); + docWriter.addOpenFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION)); lastDocID = 0; } } } public void closeDocStore(SegmentWriteState state) throws IOException { - final int inc = state.numDocsInStore - lastDocID; + final int inc = state.numDocs - lastDocID; if (inc > 0) { initFieldsWriter(); - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); } if (fieldsWriter != null) { fieldsWriter.close(); fieldsWriter = null; - assert docStoreSegment != null; - assert state.docStoreSegmentName != null; - assert docStoreSegment.equals(state.docStoreSegmentName): "fieldsWriter wrote to segment=" + docStoreSegment + " vs SegmentWriteState segment=" + state.docStoreSegmentName; lastDocID = 0; - String fieldsName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.FIELDS_EXTENSION); - String fieldsIdxName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.FIELDS_INDEX_EXTENSION); + String fieldsName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.FIELDS_EXTENSION); + String fieldsIdxName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.FIELDS_INDEX_EXTENSION); state.flushedFiles.add(fieldsName); state.flushedFiles.add(fieldsIdxName); docWriter.removeOpenFile(fieldsName); docWriter.removeOpenFile(fieldsIdxName); - if (4+((long) state.numDocsInStore)*8 != state.directory.fileLength(fieldsIdxName)) - throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(fieldsIdxName) + " length in bytes of " + fieldsIdxName + " file exists?=" + state.directory.fileExists(fieldsIdxName)); + if (4+((long) state.numDocs)*8 != state.directory.fileLength(fieldsIdxName)) + throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocs + " docs vs " + state.directory.fileLength(fieldsIdxName) + " length in bytes of " + fieldsIdxName + " file exists?=" + state.directory.fileExists(fieldsIdxName)); } } int allocCount; - PerDoc getPerDoc() { - if (freeCount == 0) { - allocCount++; - if (allocCount > docFreeList.length) { - // Grow our free list up front to make sure we have - // enough space to recycle all outstanding PerDoc - // instances - assert allocCount == 1+docFreeList.length; - docFreeList = new PerDoc[ArrayUtil.oversize(allocCount, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; - } - return new PerDoc(); - } else - return docFreeList[--freeCount]; - } - - public DocumentsWriterPerThread.DocWriter finishDocument() { - // If there were any stored fields in this doc, doc will - // be non-null; else it's null. - try { - return doc; - } finally { - doc = null; - } - } - void abort() { - if (doc != null) { - doc.abort(); - doc = null; - } + reset(); if (fieldsWriter != null) { try { @@ -162,83 +134,47 @@ /** Fills in any hole in the docIDs */ void fill(int docID) throws IOException { - final int docStoreOffset = docWriter.getDocStoreOffset(); - // We must "catch up" for all docs before us // that had no stored fields: - final int end = docID+docStoreOffset; + final int end = docID; while(lastDocID < end) { fieldsWriter.skipDocument(); lastDocID++; } } - void finishDocument(PerDoc perDoc) throws IOException { + void finishDocument() throws IOException { assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument start"); - initFieldsWriter(); - fill(perDoc.docID); - - // Append stored fields to the real FieldsWriter: - fieldsWriter.flushDocument(perDoc.numStoredFields, perDoc.fdt); - lastDocID++; - perDoc.reset(); - free(perDoc); + initFieldsWriter(); + + if (fdt != null) { + fill(docState.docID); + + // Append stored fields to the real FieldsWriter: + fieldsWriter.flushDocument(numStoredFields, fdt); + lastDocID++; + reset(); + } assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end"); } public void addField(Fieldable field, FieldInfo fieldInfo) throws IOException { - if (doc == null) { - doc = getPerDoc(); - doc.docID = docState.docID; - localFieldsWriter.setFieldsStream(doc.fdt); - assert doc.numStoredFields == 0: "doc.numStoredFields=" + doc.numStoredFields; - assert 0 == doc.fdt.length(); - assert 0 == doc.fdt.getFilePointer(); + if (fdt == null) { + buffer = docWriter.ramAllocator.newPerDocBuffer(); + fdt = new RAMOutputStream(buffer); + localFieldsWriter.setFieldsStream(fdt); + assert numStoredFields == 0: "doc.numStoredFields=" + numStoredFields; + assert 0 == fdt.length(); + assert 0 == fdt.getFilePointer(); } localFieldsWriter.writeField(fieldInfo, field); assert docState.testPoint("StoredFieldsWriterPerThread.processFields.writeField"); - doc.numStoredFields++; + numStoredFields++; } public boolean freeRAM() { return false; } - - void free(PerDoc perDoc) { - assert freeCount < docFreeList.length; - assert 0 == perDoc.numStoredFields; - assert 0 == perDoc.fdt.length(); - assert 0 == perDoc.fdt.getFilePointer(); - docFreeList[freeCount++] = perDoc; - } - - class PerDoc extends DocumentsWriterPerThread.DocWriter { - final DocumentsWriterPerThread.PerDocBuffer buffer = docWriter.newPerDocBuffer(); - RAMOutputStream fdt = new RAMOutputStream(buffer); - int numStoredFields; - - void reset() { - fdt.reset(); - buffer.recycle(); - numStoredFields = 0; - } - - @Override - public void abort() { - reset(); - free(this); - } - - @Override - public long sizeInBytes() { - return buffer.getSizeInBytes(); - } - - @Override - public void finish() throws IOException { - finishDocument(this); - } - } } Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java =================================================================== --- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (revision 966818) +++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (working copy) @@ -20,18 +20,15 @@ import java.io.IOException; import java.util.Map; -import org.apache.lucene.index.DocumentsWriterPerThread.DocWriter; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RAMOutputStream; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.RamUsageEstimator; final class TermVectorsTermsWriter extends TermsHashConsumer { final DocumentsWriterPerThread docWriter; TermVectorsWriter termVectorsWriter; - PerDoc[] docFreeList = new PerDoc[1]; int freeCount; IndexOutput tvx; IndexOutput tvd; @@ -40,7 +37,6 @@ final DocumentsWriterPerThread.DocState docState; final BytesRef flushTerm = new BytesRef(); - TermVectorsTermsWriter.PerDoc doc; // Used by perField when serializing the term vectors final ByteSliceReader vectorSliceReader = new ByteSliceReader(); @@ -55,10 +51,10 @@ if (tvx != null) { - if (state.numDocsInStore > 0) + if (state.numDocs > 0) // In case there are some final documents that we // didn't see (because they hit a non-aborting exception): - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); tvx.flush(); tvd.flush(); @@ -77,18 +73,17 @@ if (tvx != null) { // At least one doc in this run had term vectors // enabled - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); tvx.close(); tvf.close(); tvd.close(); tvx = null; - assert state.docStoreSegmentName != null; - String idxName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.VECTORS_INDEX_EXTENSION); - if (4+((long) state.numDocsInStore)*16 != state.directory.fileLength(idxName)) - throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(idxName) + " length in bytes of " + idxName + " file exists?=" + state.directory.fileExists(idxName)); + String idxName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_INDEX_EXTENSION); + if (4+((long) state.numDocs)*16 != state.directory.fileLength(idxName)) + throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocs + " docs vs " + state.directory.fileLength(idxName) + " length in bytes of " + idxName + " file exists?=" + state.directory.fileExists(idxName)); - String fldName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); - String docName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); + String fldName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); + String docName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); state.flushedFiles.add(idxName); state.flushedFiles.add(fldName); state.flushedFiles.add(docName); @@ -101,28 +96,10 @@ } } - int allocCount; - - PerDoc getPerDoc() { - if (freeCount == 0) { - allocCount++; - if (allocCount > docFreeList.length) { - // Grow our free list up front to make sure we have - // enough space to recycle all outstanding PerDoc - // instances - assert allocCount == 1+docFreeList.length; - docFreeList = new PerDoc[ArrayUtil.oversize(allocCount, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; - } - return new PerDoc(); - } else - return docFreeList[--freeCount]; - } - /** Fills in no-term-vectors for all docs we haven't seen * since the last doc that had term vectors. */ void fill(int docID) throws IOException { - final int docStoreOffset = docWriter.getDocStoreOffset(); - final int end = docID+docStoreOffset; + final int end = docID; if (lastDocID < end) { final long tvfPosition = tvf.getFilePointer(); while(lastDocID < end) { @@ -137,18 +114,18 @@ void initTermVectorsWriter() throws IOException { if (tvx == null) { - final String docStoreSegment = docWriter.getDocStoreSegment(); + final String segment = docWriter.getSegment(); - if (docStoreSegment == null) + if (segment == null) return; // If we hit an exception while init'ing the term // vector output files, we must abort this segment // because those files will be in an unknown // state: - String idxName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_INDEX_EXTENSION); - String docName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); - String fldName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); + String idxName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_INDEX_EXTENSION); + String docName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); + String fldName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); tvx = docWriter.directory.createOutput(idxName); tvd = docWriter.directory.createOutput(docName); tvf = docWriter.directory.createOutput(fldName); @@ -165,38 +142,38 @@ } } - void finishDocument(PerDoc perDoc) throws IOException { + @Override + void finishDocument() throws IOException { assert docWriter.writer.testPoint("TermVectorsTermsWriter.finishDocument start"); initTermVectorsWriter(); - fill(perDoc.docID); + fill(docState.docID); // Append term vectors to the real outputs: tvx.writeLong(tvd.getFilePointer()); tvx.writeLong(tvf.getFilePointer()); - tvd.writeVInt(perDoc.numVectorFields); - if (perDoc.numVectorFields > 0) { - for(int i=0;i 0) { + for(int i=0;i() { @Override public Long process(final DocumentsWriterPerThread perThread) throws IOException { @@ -156,11 +156,14 @@ if (finishAddDocument(perThread, perThreadRAMUsedBeforeAdd)) { super.clearThreadBindings(); - indexWriter.maybeMerge(); } return sequenceID; } }); + + indexWriter.maybeMerge(); + + return seqID; } private final boolean finishAddDocument(DocumentsWriterPerThread perThread, @@ -252,7 +255,7 @@ this.flushedSequenceID = newFlushedID; } - final boolean flushAllThreads(final boolean flushDocStores, final boolean flushDeletes) + final boolean flushAllThreads(final boolean flushDeletes) throws IOException { return threadPool.executeAllThreads(new DocumentsWriterThreadPool.AllThreadsTask() { @Override @@ -260,61 +263,29 @@ boolean anythingFlushed = false; if (flushDeletes) { - synchronized (indexWriter) { - if (applyDeletes(indexWriter.segmentInfos)) { - indexWriter.checkpoint(); - } + if (applyDeletes(indexWriter.segmentInfos)) { + indexWriter.checkpoint(); } } while (threadsIterator.hasNext()) { - boolean perThreadFlushDocStores = flushDocStores; DocumentsWriterPerThread perThread = threadsIterator.next(); final int numDocs = perThread.getNumDocsInRAM(); // Always flush docs if there are any boolean flushDocs = numDocs > 0; - String docStoreSegment = perThread.getDocStoreSegment(); - if (docStoreSegment == null) { - perThreadFlushDocStores = false; - } - int docStoreOffset = perThread.getDocStoreOffset(); - boolean docStoreIsCompoundFile = false; - if (perThreadFlushDocStores - && (!flushDocs || !perThread.getSegment().equals(perThread.getDocStoreSegment()))) { - // We must separately flush the doc store - if (infoStream != null) { - message(" flush shared docStore segment " + docStoreSegment); - } - docStoreIsCompoundFile = flushDocStores(perThread); - flushDocStores(perThread); - perThreadFlushDocStores = false; - } - String segment = perThread.getSegment(); // If we are flushing docs, segment must not be null: assert segment != null || !flushDocs; if (flushDocs) { - SegmentInfo newSegment = perThread.flush(perThreadFlushDocStores); + SegmentInfo newSegment = perThread.flush(); if (newSegment != null) { anythingFlushed = true; - if (0 == docStoreOffset && perThreadFlushDocStores) { - // This means we are flushing private doc stores - // with this segment, so it will not be shared - // with other segments - assert docStoreSegment != null; - assert docStoreSegment.equals(segment); - docStoreOffset = -1; - docStoreSegment = null; - docStoreIsCompoundFile = false; - } - newSegment.setDocStore(docStoreOffset, docStoreSegment, docStoreIsCompoundFile); - IndexWriter.setDiagnostics(newSegment, "flush"); finishFlushedSegment(newSegment, perThread); } @@ -361,112 +332,38 @@ synchronized(indexWriter) { indexWriter.segmentInfos.add(newSegment); indexWriter.checkpoint(); - SegmentReader reader = indexWriter.readerPool.get(newSegment, false); - boolean any = false; - try { - any = applyDeletes(reader, newSegment.getMinSequenceID(), newSegment.getMaxSequenceID(), perThread.sequenceIDs); - } finally { - indexWriter.readerPool.release(reader); - } - if (any) { - indexWriter.checkpoint(); - } - - if (indexWriter.mergePolicy.useCompoundFile(indexWriter.segmentInfos, newSegment)) { - // Now build compound file - boolean success = false; - try { - createCompoundFile(newSegment.name, perThread); - success = true; - } finally { - if (!success) { - if (infoStream != null) { - message("hit exception " + - "reating compound file for newly flushed segment " + newSegment.name); - } - indexWriter.deleter.deleteFile(IndexFileNames.segmentFileName(newSegment.name, "", - IndexFileNames.COMPOUND_FILE_EXTENSION)); - } - } - - newSegment.setUseCompoundFile(true); - indexWriter.checkpoint(); - } } - } + SegmentReader reader = indexWriter.readerPool.get(newSegment, false); + boolean any = false; + try { + any = applyDeletes(reader, newSegment.getMinSequenceID(), newSegment.getMaxSequenceID(), perThread.sequenceIDs); + } finally { + indexWriter.readerPool.release(reader); + } + if (any) { + indexWriter.checkpoint(); + } - - private boolean flushDocStores(DocumentsWriterPerThread perThread) throws IOException { - boolean useCompoundDocStore = false; - - String docStoreSegment; - + if (indexWriter.mergePolicy.useCompoundFile(indexWriter.segmentInfos, newSegment)) { + // Now build compound file boolean success = false; try { - docStoreSegment = perThread.closeDocStore(); + createCompoundFile(newSegment.name, perThread); success = true; } finally { - if (!success && infoStream != null) { - message("hit exception closing doc store segment"); - } - } - - useCompoundDocStore = indexWriter.mergePolicy.useCompoundDocStore(indexWriter.segmentInfos); - - if (useCompoundDocStore && docStoreSegment != null && perThread.closedFiles().size() != 0) { - // Now build compound doc store file - - if (infoStream != null) { - message("create compound file " - + IndexFileNames.segmentFileName(docStoreSegment, "", - IndexFileNames.COMPOUND_FILE_STORE_EXTENSION)); - } - - success = false; - - final int numSegments = indexWriter.segmentInfos.size(); - final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, "", - IndexFileNames.COMPOUND_FILE_STORE_EXTENSION); - - try { - CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName); - for (final String file : perThread.closedFiles()) { - cfsWriter.addFile(file); + if (!success) { + if (infoStream != null) { + message("hit exception " + + "reating compound file for newly flushed segment " + newSegment.name); } - - // Perform the merge - cfsWriter.close(); - success = true; - - } finally { - if (!success) { - if (infoStream != null) - message("hit exception building compound file doc store for segment " + docStoreSegment); - synchronized(indexWriter) { - indexWriter.deleter.deleteFile(compoundFileName); - } - abort(); - } + indexWriter.deleter.deleteFile(IndexFileNames.segmentFileName(newSegment.name, "", + IndexFileNames.COMPOUND_FILE_EXTENSION)); } - - synchronized(indexWriter) { - for (int i = 0; i < numSegments; i++) { - SegmentInfo si = indexWriter.segmentInfos.info(i); - if (si.getDocStoreOffset() != -1 && - si.getDocStoreSegment().equals(docStoreSegment)) - si.setDocStoreIsCompoundFile(true); - } - - indexWriter.checkpoint(); - - // In case the files we just merged into a CFS were - // not previously checkpointed: - indexWriter.deleter.deleteNewFiles(perThread.closedFiles()); - } } - - return useCompoundDocStore; - + + newSegment.setUseCompoundFile(true); + indexWriter.checkpoint(); + } } // Returns true if an abort is in progress @@ -495,7 +392,7 @@ private final boolean maybeFlushPerThread(DocumentsWriterPerThread perThread) throws IOException { if (perThread.getNumDocsInRAM() == maxBufferedDocs) { - flushSegment(perThread, false); + flushSegment(perThread); assert perThread.getNumDocsInRAM() == 0; return true; } @@ -503,18 +400,15 @@ return false; } - private boolean flushSegment(DocumentsWriterPerThread perThread, boolean flushDocStores) + private boolean flushSegment(DocumentsWriterPerThread perThread) throws IOException { - if (perThread.getNumDocsInRAM() == 0 && !flushDocStores) { + if (perThread.getNumDocsInRAM() == 0) { return false; } - int docStoreOffset = perThread.getDocStoreOffset(); - String docStoreSegment = perThread.getDocStoreSegment(); - SegmentInfo newSegment = perThread.flush(flushDocStores); + SegmentInfo newSegment = perThread.flush(); if (newSegment != null) { - newSegment.setDocStore(docStoreOffset, docStoreSegment, false); finishFlushedSegment(newSegment, perThread); return true; } @@ -580,41 +474,39 @@ // } boolean applyDeletes(SegmentInfos infos) throws IOException { - synchronized(indexWriter) { - if (!hasDeletes()) - return false; - - final long t0 = System.currentTimeMillis(); - - if (infoStream != null) { - message("apply " + deletesInRAM.getNumDeletes() + " buffered deletes on " + - +infos.size() + " segments."); + if (!hasDeletes()) + return false; + + final long t0 = System.currentTimeMillis(); + + if (infoStream != null) { + message("apply " + deletesInRAM.getNumDeletes() + " buffered deletes on " + + +infos.size() + " segments."); + } + + final int infosEnd = infos.size(); + + boolean any = false; + for (int i = 0; i < infosEnd; i++) { + + // Make sure we never attempt to apply deletes to + // segment in external dir + assert infos.info(i).dir == directory; + + SegmentInfo si = infos.info(i); + SegmentReader reader = indexWriter.readerPool.get(si, false); + try { + any |= applyDeletes(reader, si.getMinSequenceID(), si.getMaxSequenceID(), null); + } finally { + indexWriter.readerPool.release(reader); } - - final int infosEnd = infos.size(); - - boolean any = false; - for (int i = 0; i < infosEnd; i++) { - - // Make sure we never attempt to apply deletes to - // segment in external dir - assert infos.info(i).dir == directory; - - SegmentInfo si = infos.info(i); - SegmentReader reader = indexWriter.readerPool.get(si, false); - try { - any |= applyDeletes(reader, si.getMinSequenceID(), si.getMaxSequenceID(), null); - } finally { - indexWriter.readerPool.release(reader); - } - } - - if (infoStream != null) { - message("apply deletes took " + (System.currentTimeMillis() - t0) + " msec"); - } - - return any; } + + if (infoStream != null) { + message("apply deletes took " + (System.currentTimeMillis() - t0) + " msec"); + } + + return any; } // Apply buffered delete terms, queries and docIDs to the Index: src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java =================================================================== --- src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (revision 966819) +++ src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (working copy) @@ -68,15 +68,7 @@ } if (doVectors) { - if (termsWriter.doc == null) { - termsWriter.doc = termsWriter.getPerDoc(); - termsWriter.doc.docID = docState.docID; - assert termsWriter.doc.numVectorFields == 0; - assert 0 == termsWriter.doc.perDocTvf.length(); - assert 0 == termsWriter.doc.perDocTvf.getFilePointer(); - } else { - assert termsWriter.doc.docID == docState.docID; - + if (termsWriter.buffer != null) { if (termsHashPerField.numPostings != 0) // Only necessary if previous doc hit a // non-aborting exception while writing vectors in @@ -114,7 +106,7 @@ if (numPostings > maxNumPostings) maxNumPostings = numPostings; - final IndexOutput tvf = termsWriter.doc.perDocTvf; + final IndexOutput tvf = termsWriter.perDocTvf; // This is called once, after inverting all occurrences // of a given field in the doc. At this point we flush @@ -123,7 +115,7 @@ assert fieldInfo.storeTermVector; assert termsWriter.vectorFieldsInOrder(fieldInfo); - termsWriter.doc.addField(termsHashPerField.fieldInfo.number); + termsWriter.addField(termsHashPerField.fieldInfo.number); TermVectorsPostingsArray postings = (TermVectorsPostingsArray) termsHashPerField.postingsArray; // TODO: we may want to make this sort in same order Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 966818) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -413,7 +413,7 @@ // this method is called: poolReaders = true; - flush(true, true, false); + flush(true, false); // Prevent segmentInfos from changing while opening the // reader; in theory we could do similar retry logic, @@ -1440,7 +1440,7 @@ // Only allow a new merge to be triggered if we are // going to wait for merges: if (!hitOOM) { - flush(waitForMerges, true, true); + flush(waitForMerges, true); } if (waitForMerges) @@ -1961,7 +1961,7 @@ if (infoStream != null) message("optimize: index now " + segString()); - flush(true, false, true); + flush(true, true); synchronized(this) { resetMergeExceptions(); @@ -2490,7 +2490,7 @@ try { if (infoStream != null) message("flush at addIndexes(Directory...)"); - flush(true, false, true); + flush(true, true); int docCount = 0; List infos = new ArrayList(); @@ -2537,7 +2537,7 @@ } // Update SI appropriately - info.setDocStore(info.getDocStoreOffset(), newDsName, info.getDocStoreIsCompoundFile()); + info.setDocStoreSegment(newDsName); info.dir = directory; info.name = newSegName; @@ -2595,8 +2595,7 @@ SegmentInfo info = null; synchronized(this) { - info = new SegmentInfo(mergedName, docCount, directory, false, -1, - null, false, merger.hasProx(), merger.getCodec()); + info = new SegmentInfo(mergedName, docCount, directory, false, merger.hasProx(), merger.getCodec()); setDiagnostics(info, "addIndexes(IndexReader...)"); segmentInfos.add(info); checkpoint(); @@ -2705,7 +2704,7 @@ if (infoStream != null) message("prepareCommit: flush"); - flush(true, true, true); + flush(true, true); startCommit(0, commitUserData); } @@ -2826,18 +2825,18 @@ * @param flushDeletes whether pending deletes should also * be flushed */ - protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException { + protected final void flush(boolean triggerMerge, boolean flushDeletes) throws CorruptIndexException, IOException { // We can be called during close, when closing==true, so we must pass false to ensureOpen: ensureOpen(false); - if (doFlush(flushDocStores, flushDeletes) && triggerMerge) + if (doFlush(flushDeletes) && triggerMerge) maybeMerge(); } // TODO: this method should not have to be entirely // synchronized, ie, merges should be allowed to commit // even while a flush is happening - private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException { - return docWriter.flushAllThreads(flushDocStores, flushDeletes); + private synchronized final boolean doFlush(boolean flushDeletes) throws CorruptIndexException, IOException { + return docWriter.flushAllThreads(flushDeletes); // nocommit // try { // try { @@ -2998,7 +2997,6 @@ // nocommit //docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount); - setMergeDocStoreIsCompoundFile(merge); merge.info.setHasProx(merger.hasProx()); segmentInfos.subList(start, start + merge.segments.size()).clear(); @@ -3199,108 +3197,13 @@ final SegmentInfos sourceSegments = merge.segments; final int end = sourceSegments.size(); - // Check whether this merge will allow us to skip - // merging the doc stores (stored field & vectors). - // This is a very substantial optimization (saves tons - // of IO). - - Directory lastDir = directory; - String lastDocStoreSegment = null; - int next = -1; - - boolean mergeDocStores = false; - boolean doFlushDocStore = false; - // nocommit - //final String currentDocStoreSegment = docWriter.getDocStoreSegment(); - - // Test each segment to be merged: check if we need to - // flush/merge doc stores - for (int i = 0; i < end; i++) { - SegmentInfo si = sourceSegments.info(i); - - // If it has deletions we must merge the doc stores - if (si.hasDeletions()) - mergeDocStores = true; - - // If it has its own (private) doc stores we must - // merge the doc stores - if (-1 == si.getDocStoreOffset()) - mergeDocStores = true; - - // If it has a different doc store segment than - // previous segments, we must merge the doc stores - String docStoreSegment = si.getDocStoreSegment(); - if (docStoreSegment == null) - mergeDocStores = true; - else if (lastDocStoreSegment == null) - lastDocStoreSegment = docStoreSegment; - else if (!lastDocStoreSegment.equals(docStoreSegment)) - mergeDocStores = true; - - // Segments' docScoreOffsets must be in-order, - // contiguous. For the default merge policy now - // this will always be the case but for an arbitrary - // merge policy this may not be the case - if (-1 == next) - next = si.getDocStoreOffset() + si.docCount; - else if (next != si.getDocStoreOffset()) - mergeDocStores = true; - else - next = si.getDocStoreOffset() + si.docCount; - - // If the segment comes from a different directory - // we must merge - if (lastDir != si.dir) - mergeDocStores = true; - - // If the segment is referencing the current "live" - // doc store outputs then we must merge - // nocommit -// if (si.getDocStoreOffset() != -1 && currentDocStoreSegment != null && si.getDocStoreSegment().equals(currentDocStoreSegment)) { -// doFlushDocStore = true; -// } - } - - final int docStoreOffset; - final String docStoreSegment; - final boolean docStoreIsCompoundFile; - - if (mergeDocStores) { - docStoreOffset = -1; - docStoreSegment = null; - docStoreIsCompoundFile = false; - } else { - SegmentInfo si = sourceSegments.info(0); - docStoreOffset = si.getDocStoreOffset(); - docStoreSegment = si.getDocStoreSegment(); - docStoreIsCompoundFile = si.getDocStoreIsCompoundFile(); - } - - if (mergeDocStores && doFlushDocStore) { - // SegmentMerger intends to merge the doc stores - // (stored fields, vectors), and at least one of the - // segments to be merged refers to the currently - // live doc stores. - - // TODO: if we know we are about to merge away these - // newly flushed doc store files then we should not - // make compound file out of them... - if (infoStream != null) - message("now flush at merge"); - doFlush(true, false); - } - merge.increfDone = true; - merge.mergeDocStores = mergeDocStores; - // Bind a new segment name here so even with // ConcurrentMergePolicy we keep deterministic segment // names. merge.info = new SegmentInfo(newSegmentName(), 0, - directory, false, docStoreOffset, - docStoreSegment, - docStoreIsCompoundFile, + directory, false, false, null); @@ -3308,7 +3211,6 @@ Map details = new HashMap(); details.put("optimize", Boolean.toString(merge.optimize)); details.put("mergeFactor", Integer.toString(end)); - details.put("mergeDocStores", Boolean.toString(mergeDocStores)); setDiagnostics(merge.info, "merge", details); // Also enroll the merged segment into mergingSegments; @@ -3368,23 +3270,6 @@ runningMerges.remove(merge); } - private synchronized void setMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge) { - final String mergeDocStoreSegment = merge.info.getDocStoreSegment(); - if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) { - final int size = segmentInfos.size(); - for(int i=0;i dss = new HashSet(); - // This is try/finally to make sure merger's readers are // closed: boolean success = false; @@ -3426,7 +3307,7 @@ // Hold onto the "live" reader; we will use this to // commit merged deletes - SegmentReader reader = merge.readers[i] = readerPool.get(info, merge.mergeDocStores, + SegmentReader reader = merge.readers[i] = readerPool.get(info, true, MERGE_READ_BUFFER_SIZE, -config.getReaderTermsIndexDivisor()); @@ -3436,14 +3317,6 @@ SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.clone(true); merger.add(clone); - if (clone.hasDeletions()) { - mergeDocStores = true; - } - - if (info.getDocStoreOffset() != -1) { - dss.add(info.getDocStoreSegment()); - } - totDocCount += clone.numDocs(); } @@ -3453,40 +3326,12 @@ merge.checkAborted(directory); - // If deletions have arrived and it has now become - // necessary to merge doc stores, go and open them: - if (mergeDocStores && !merge.mergeDocStores) { - merge.mergeDocStores = true; - synchronized(this) { - - // If 1) we must now merge doc stores, and 2) at - // least one of the segments we are merging uses - // the doc store we are now writing to, we must at - // this point force this doc store closed (by - // calling flush). If we didn't do this then the - // readers will attempt to open an IndexInput - // on files that have still-open IndexOutputs - // against them: - // nocommit -// if (dss.contains(docWriter.getDocStoreSegment())) { -// if (infoStream != null) -// message("now flush at mergeMiddle"); -// doFlush(true, false); -// } - } - - for(int i=0;i flushedFiles; // Actual codec used @@ -61,16 +59,12 @@ public final int maxSkipLevels = 10; public SegmentWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, - String docStoreSegmentName, int numDocs, - int numDocsInStore, int termIndexInterval, - CodecProvider codecs) { + int numDocs, int termIndexInterval, CodecProvider codecs) { this.infoStream = infoStream; this.directory = directory; this.segmentName = segmentName; this.fieldInfos = fieldInfos; - this.docStoreSegmentName = docStoreSegmentName; this.numDocs = numDocs; - this.numDocsInStore = numDocsInStore; this.termIndexInterval = termIndexInterval; this.codec = codecs.getWriter(this); flushedFiles = new HashSet(); Index: src/java/org/apache/lucene/index/DocConsumer.java =================================================================== --- src/java/org/apache/lucene/index/DocConsumer.java (revision 966818) +++ src/java/org/apache/lucene/index/DocConsumer.java (working copy) @@ -20,7 +20,8 @@ import java.io.IOException; abstract class DocConsumer { - abstract DocumentsWriterPerThread.DocWriter processDocument() throws IOException; + abstract void processDocument() throws IOException; + abstract void finishDocument() throws IOException; abstract void flush(final SegmentWriteState state) throws IOException; abstract void closeDocStore(final SegmentWriteState state) throws IOException; abstract void abort(); Index: src/java/org/apache/lucene/index/DocInverter.java =================================================================== --- src/java/org/apache/lucene/index/DocInverter.java (revision 966818) +++ src/java/org/apache/lucene/index/DocInverter.java (working copy) @@ -95,11 +95,11 @@ } @Override - public DocumentsWriterPerThread.DocWriter finishDocument() throws IOException { + public void finishDocument() throws IOException { // TODO: allow endConsumer.finishDocument to also return // a DocWriter endConsumer.finishDocument(); - return consumer.finishDocument(); + consumer.finishDocument(); }